summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-07-02 17:11:31 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-07-06 16:53:06 +0200
commit704fd8f3745527fc080f96e54e5ec1857c505399 (patch)
treebff68e8a731f3618d3e8f1708aa9de194bc1f612 /deps/v8/src
parenteec43351c44c0bec31a83e1a28be15e30722936a (diff)
downloadnode-new-704fd8f3745527fc080f96e54e5ec1857c505399.tar.gz
v8: upgrade to v3.20.2
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/api.cc627
-rw-r--r--deps/v8/src/api.h78
-rw-r--r--deps/v8/src/arguments.cc14
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h29
-rw-r--r--deps/v8/src/arm/assembler-arm.cc2
-rw-r--r--deps/v8/src/arm/builtins-arm.cc431
-rwxr-xr-x[-rw-r--r--]deps/v8/src/arm/code-stubs-arm.cc353
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h1
-rw-r--r--deps/v8/src/arm/codegen-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.cc9
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc220
-rw-r--r--deps/v8/src/arm/ic-arm.cc9
-rw-r--r--deps/v8/src/arm/lithium-arm.cc96
-rw-r--r--deps/v8/src/arm/lithium-arm.h92
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc491
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h22
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc51
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h4
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.cc67
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc180
-rw-r--r--deps/v8/src/arraybuffer.js11
-rw-r--r--deps/v8/src/assembler.cc10
-rw-r--r--deps/v8/src/assembler.h30
-rw-r--r--deps/v8/src/assert-scope.h29
-rw-r--r--deps/v8/src/ast.cc108
-rw-r--r--deps/v8/src/ast.h113
-rw-r--r--deps/v8/src/atomicops.h10
-rw-r--r--deps/v8/src/atomicops_internals_tsan.h249
-rw-r--r--deps/v8/src/bootstrapper.cc88
-rw-r--r--deps/v8/src/builtins.cc63
-rw-r--r--deps/v8/src/builtins.h7
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc60
-rw-r--r--deps/v8/src/code-stubs.cc68
-rw-r--r--deps/v8/src/code-stubs.h157
-rw-r--r--deps/v8/src/codegen.cc3
-rw-r--r--deps/v8/src/collection.js13
-rw-r--r--deps/v8/src/compiler.cc113
-rw-r--r--deps/v8/src/compiler.h95
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/cpu-profiler-inl.h21
-rw-r--r--deps/v8/src/cpu-profiler.cc364
-rw-r--r--deps/v8/src/cpu-profiler.h77
-rw-r--r--deps/v8/src/d8-debug.cc6
-rw-r--r--deps/v8/src/d8-readline.cc15
-rw-r--r--deps/v8/src/d8.cc11
-rw-r--r--deps/v8/src/d8.gyp3
-rw-r--r--deps/v8/src/d8.js2
-rw-r--r--deps/v8/src/data-flow.h2
-rw-r--r--deps/v8/src/debug-debugger.js45
-rw-r--r--deps/v8/src/debug.cc104
-rw-r--r--deps/v8/src/debug.h18
-rw-r--r--deps/v8/src/deoptimizer.cc369
-rw-r--r--deps/v8/src/deoptimizer.h34
-rw-r--r--deps/v8/src/execution.h2
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.cc331
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.h85
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.js197
-rw-r--r--deps/v8/src/extensions/i18n/collator.cc363
-rw-r--r--deps/v8/src/extensions/i18n/collator.h68
-rw-r--r--deps/v8/src/extensions/i18n/collator.js212
-rw-r--r--deps/v8/src/extensions/i18n/date-format.cc329
-rw-r--r--deps/v8/src/extensions/i18n/date-format.h71
-rw-r--r--deps/v8/src/extensions/i18n/date-format.js478
-rw-r--r--deps/v8/src/extensions/i18n/footer.js40
-rw-r--r--deps/v8/src/extensions/i18n/globals.js168
-rw-r--r--deps/v8/src/extensions/i18n/header.js41
-rw-r--r--deps/v8/src/extensions/i18n/i18n-extension.cc116
-rw-r--r--deps/v8/src/extensions/i18n/i18n-extension.h51
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.cc174
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.h91
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.js541
-rw-r--r--deps/v8/src/extensions/i18n/locale.cc248
-rw-r--r--deps/v8/src/extensions/i18n/locale.h56
-rw-r--r--deps/v8/src/extensions/i18n/locale.js192
-rw-r--r--deps/v8/src/extensions/i18n/number-format.cc418
-rw-r--r--deps/v8/src/extensions/i18n/number-format.h69
-rw-r--r--deps/v8/src/extensions/i18n/number-format.js295
-rw-r--r--deps/v8/src/extensions/i18n/overrides.js220
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc6
-rw-r--r--deps/v8/src/factory.cc74
-rw-r--r--deps/v8/src/factory.h11
-rw-r--r--deps/v8/src/flag-definitions.h22
-rw-r--r--deps/v8/src/flags.cc4
-rw-r--r--deps/v8/src/frames-inl.h86
-rw-r--r--deps/v8/src/frames.cc308
-rw-r--r--deps/v8/src/frames.h242
-rw-r--r--deps/v8/src/full-codegen.cc25
-rw-r--r--deps/v8/src/full-codegen.h31
-rw-r--r--deps/v8/src/gdb-jit.cc119
-rw-r--r--deps/v8/src/gdb-jit.h2
-rw-r--r--deps/v8/src/generator.js19
-rw-r--r--deps/v8/src/global-handles.h2
-rw-r--r--deps/v8/src/globals.h43
-rw-r--r--deps/v8/src/handles.cc23
-rw-r--r--deps/v8/src/handles.h3
-rw-r--r--deps/v8/src/heap-inl.h33
-rw-r--r--deps/v8/src/heap-profiler.cc5
-rw-r--r--deps/v8/src/heap-profiler.h2
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc156
-rw-r--r--deps/v8/src/heap-snapshot-generator.h19
-rw-r--r--deps/v8/src/heap.cc460
-rw-r--r--deps/v8/src/heap.h42
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc120
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.h26
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc66
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.h57
-rw-r--r--deps/v8/src/hydrogen-gvn.cc61
-rw-r--r--deps/v8/src/hydrogen-gvn.h28
-rw-r--r--deps/v8/src/hydrogen-infer-representation.cc172
-rw-r--r--deps/v8/src/hydrogen-infer-representation.h57
-rw-r--r--deps/v8/src/hydrogen-instructions.cc205
-rw-r--r--deps/v8/src/hydrogen-instructions.h268
-rw-r--r--deps/v8/src/hydrogen-osr.cc123
-rw-r--r--deps/v8/src/hydrogen-osr.h70
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc169
-rw-r--r--deps/v8/src/hydrogen-range-analysis.h61
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc231
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.h59
-rw-r--r--deps/v8/src/hydrogen.cc2441
-rw-r--r--deps/v8/src/hydrogen.h249
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h29
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc501
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc321
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc2
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc2
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc2
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc2
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc9
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc220
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc9
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc510
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h22
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc114
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h100
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc49
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h11
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc3
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc183
-rw-r--r--deps/v8/src/ic.cc272
-rw-r--r--deps/v8/src/ic.h34
-rw-r--r--deps/v8/src/incremental-marking.cc3
-rw-r--r--deps/v8/src/isolate.cc133
-rw-r--r--deps/v8/src/isolate.h53
-rw-r--r--deps/v8/src/json-parser.h53
-rw-r--r--deps/v8/src/jsregexp.cc14
-rw-r--r--deps/v8/src/jsregexp.h3
-rw-r--r--deps/v8/src/lithium-allocator-inl.h5
-rw-r--r--deps/v8/src/lithium-allocator.cc219
-rw-r--r--deps/v8/src/lithium-allocator.h21
-rw-r--r--deps/v8/src/lithium.cc9
-rw-r--r--deps/v8/src/lithium.h51
-rw-r--r--deps/v8/src/liveedit.cc19
-rw-r--r--deps/v8/src/liveedit.h4
-rw-r--r--deps/v8/src/log-inl.h1
-rw-r--r--deps/v8/src/log-utils.cc12
-rw-r--r--deps/v8/src/log-utils.h3
-rw-r--r--deps/v8/src/log.cc334
-rw-r--r--deps/v8/src/log.h17
-rw-r--r--deps/v8/src/macros.py2
-rw-r--r--deps/v8/src/mark-compact.cc220
-rw-r--r--deps/v8/src/mark-compact.h9
-rw-r--r--deps/v8/src/marking-thread.cc1
-rw-r--r--deps/v8/src/messages.js13
-rw-r--r--deps/v8/src/mips/OWNERS1
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h29
-rw-r--r--deps/v8/src/mips/assembler-mips.cc4
-rwxr-xr-x[-rw-r--r--]deps/v8/src/mips/builtins-mips.cc447
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc386
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h2
-rw-r--r--deps/v8/src/mips/codegen-mips.cc2
-rw-r--r--deps/v8/src/mips/constants-mips.cc2
-rw-r--r--deps/v8/src/mips/cpu-mips.cc2
-rw-r--r--deps/v8/src/mips/debug-mips.cc2
-rw-r--r--deps/v8/src/mips/disasm-mips.cc2
-rw-r--r--deps/v8/src/mips/frames-mips.cc12
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc226
-rw-r--r--deps/v8/src/mips/ic-mips.cc9
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc705
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h38
-rw-r--r--deps/v8/src/mips/lithium-mips.cc149
-rw-r--r--deps/v8/src/mips/lithium-mips.h114
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc48
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h5
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc2
-rw-r--r--deps/v8/src/mips/simulator-mips.cc57
-rw-r--r--deps/v8/src/mips/simulator-mips.h2
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc183
-rw-r--r--deps/v8/src/mirror-debugger.js59
-rw-r--r--deps/v8/src/mksnapshot.cc18
-rw-r--r--deps/v8/src/natives.h3
-rw-r--r--deps/v8/src/objects-debug.cc36
-rw-r--r--deps/v8/src/objects-inl.h221
-rw-r--r--deps/v8/src/objects-printer.cc46
-rw-r--r--deps/v8/src/objects-visiting-inl.h108
-rw-r--r--deps/v8/src/objects-visiting.cc8
-rw-r--r--deps/v8/src/objects-visiting.h7
-rw-r--r--deps/v8/src/objects.cc945
-rw-r--r--deps/v8/src/objects.h389
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc37
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h10
-rw-r--r--deps/v8/src/parser.cc71
-rw-r--r--deps/v8/src/parser.h10
-rw-r--r--deps/v8/src/platform-cygwin.cc6
-rw-r--r--deps/v8/src/platform-freebsd.cc6
-rw-r--r--deps/v8/src/platform-linux.cc15
-rw-r--r--deps/v8/src/platform-macos.cc8
-rw-r--r--deps/v8/src/platform-openbsd.cc9
-rw-r--r--deps/v8/src/platform-posix.cc23
-rw-r--r--deps/v8/src/platform-solaris.cc6
-rw-r--r--deps/v8/src/platform-tls-mac.h4
-rw-r--r--deps/v8/src/platform-win32.cc10
-rw-r--r--deps/v8/src/platform.h3
-rw-r--r--deps/v8/src/preparser.cc16
-rw-r--r--deps/v8/src/preparser.h2
-rw-r--r--deps/v8/src/prettyprinter.cc12
-rw-r--r--deps/v8/src/profile-generator-inl.h8
-rw-r--r--deps/v8/src/profile-generator.cc159
-rw-r--r--deps/v8/src/profile-generator.h48
-rw-r--r--deps/v8/src/property-details.h4
-rw-r--r--deps/v8/src/property.h9
-rw-r--r--deps/v8/src/runtime-profiler.cc55
-rw-r--r--deps/v8/src/runtime-profiler.h2
-rw-r--r--deps/v8/src/runtime.cc539
-rw-r--r--deps/v8/src/runtime.h37
-rw-r--r--deps/v8/src/runtime.js7
-rw-r--r--deps/v8/src/sampler.cc203
-rw-r--r--deps/v8/src/sampler.h29
-rw-r--r--deps/v8/src/serialize.cc14
-rw-r--r--deps/v8/src/serialize.h4
-rw-r--r--deps/v8/src/snapshot-common.cc10
-rw-r--r--deps/v8/src/snapshot-empty.cc2
-rw-r--r--deps/v8/src/snapshot.h2
-rw-r--r--deps/v8/src/spaces.cc117
-rw-r--r--deps/v8/src/spaces.h39
-rw-r--r--deps/v8/src/store-buffer.cc22
-rw-r--r--deps/v8/src/store-buffer.h3
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/string.js56
-rw-r--r--deps/v8/src/strtod.cc2
-rw-r--r--deps/v8/src/stub-cache.cc61
-rw-r--r--deps/v8/src/stub-cache.h36
-rw-r--r--deps/v8/src/sweeper-thread.cc1
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp3
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc3
-rw-r--r--deps/v8/src/type-info.cc256
-rw-r--r--deps/v8/src/type-info.h47
-rw-r--r--deps/v8/src/typedarray.js288
-rw-r--r--deps/v8/src/types.cc224
-rw-r--r--deps/v8/src/types.h109
-rw-r--r--deps/v8/src/typing.cc26
-rw-r--r--deps/v8/src/typing.h9
-rw-r--r--deps/v8/src/unbound-queue-inl.h26
-rw-r--r--deps/v8/src/unbound-queue.h6
-rw-r--r--deps/v8/src/v8-counters.h62
-rw-r--r--deps/v8/src/v8.cc38
-rw-r--r--deps/v8/src/v8.h7
-rw-r--r--deps/v8/src/v8globals.h3
-rw-r--r--deps/v8/src/v8natives.js26
-rw-r--r--deps/v8/src/v8utils.h4
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h29
-rw-r--r--deps/v8/src/x64/assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/builtins-x64.cc502
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc481
-rw-r--r--deps/v8/src/x64/codegen-x64.cc14
-rw-r--r--deps/v8/src/x64/cpu-x64.cc2
-rw-r--r--deps/v8/src/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc4
-rw-r--r--deps/v8/src/x64/disasm-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.cc9
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc220
-rw-r--r--deps/v8/src/x64/ic-x64.cc163
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc496
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h22
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc97
-rw-r--r--deps/v8/src/x64/lithium-x64.h90
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc88
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h12
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc3
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc199
-rw-r--r--deps/v8/src/zone-inl.h14
-rw-r--r--deps/v8/src/zone.cc67
-rw-r--r--deps/v8/src/zone.h67
293 files changed, 17874 insertions, 11591 deletions
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 20496fefde..638a25f317 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -38,6 +38,7 @@
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
@@ -183,6 +184,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.cell_space_size = &cell_space_size;
intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
+ intptr_t property_cell_space_size;
+ heap_stats.property_cell_space_size = &property_cell_space_size;
+ intptr_t property_cell_space_capacity;
+ heap_stats.property_cell_space_capacity = &property_cell_space_capacity;
intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
@@ -296,8 +301,13 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
// --- S t a t i c s ---
-static bool InitializeHelper() {
- if (i::Snapshot::Initialize()) return true;
+static bool InitializeHelper(i::Isolate* isolate) {
+ // If the isolate has a function entry hook, it needs to re-build all its
+ // code stubs with entry hooks embedded, so let's deserialize a snapshot.
+ if (isolate == NULL || isolate->function_entry_hook() == NULL) {
+ if (i::Snapshot::Initialize())
+ return true;
+ }
return i::V8::Initialize(NULL);
}
@@ -309,7 +319,7 @@ static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
if (isolate->IsInitialized()) return true;
}
ASSERT(isolate == i::Isolate::Current());
- return ApiCheck(InitializeHelper(), location, "Error initializing V8");
+ return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
}
// Some initializing API functions are called early and may be
@@ -385,6 +395,9 @@ enum CompressedStartupDataItems {
kSnapshotContext,
kLibraries,
kExperimentalLibraries,
+#if defined(ENABLE_I18N_SUPPORT)
+ kI18NExtension,
+#endif
kCompressedStartupDataCount
};
@@ -424,6 +437,17 @@ void V8::GetCompressedStartupData(StartupData* compressed_data) {
exp_libraries_source.length();
compressed_data[kExperimentalLibraries].raw_size =
i::ExperimentalNatives::GetRawScriptsSize();
+
+#if defined(ENABLE_I18N_SUPPORT)
+ i::Vector<const ii:byte> i18n_extension_source =
+ i::I18NNatives::GetScriptsSource();
+ compressed_data[kI18NExtension].data =
+ reinterpret_cast<const char*>(i18n_extension_source.start());
+ compressed_data[kI18NExtension].compressed_size =
+ i18n_extension_source.length();
+ compressed_data[kI18NExtension].raw_size =
+ i::I18NNatives::GetRawScriptsSize();
+#endif
#endif
}
@@ -453,6 +477,15 @@ void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
decompressed_data[kExperimentalLibraries].data,
decompressed_data[kExperimentalLibraries].raw_size);
i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
+
+#if defined(ENABLE_I18N_SUPPORT)
+ ASSERT_EQ(i::I18NNatives::GetRawScriptsSize(),
+ decompressed_data[kI18NExtension].raw_size);
+ i::Vector<const char> i18n_extension_source(
+ decompressed_data[kI18NExtension].data,
+ decompressed_data[kI18NExtension].raw_size);
+ i::I18NNatives::SetRawScriptsSource(i18n_extension_source);
+#endif
#endif
}
@@ -548,8 +581,7 @@ v8::Handle<Primitive> Undefined() {
if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
return v8::Handle<v8::Primitive>();
}
- return v8::Handle<Primitive>(ToApi<Primitive>(
- isolate->factory()->undefined_value()));
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -558,8 +590,7 @@ v8::Handle<Primitive> Null() {
if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
return v8::Handle<v8::Primitive>();
}
- return v8::Handle<Primitive>(
- ToApi<Primitive>(isolate->factory()->null_value()));
+ return ToApiHandle<Primitive>(isolate->factory()->null_value());
}
@@ -568,8 +599,7 @@ v8::Handle<Boolean> True() {
if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
return v8::Handle<Boolean>();
}
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->true_value()));
+ return ToApiHandle<Boolean>(isolate->factory()->true_value());
}
@@ -578,8 +608,7 @@ v8::Handle<Boolean> False() {
if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
return v8::Handle<Boolean>();
}
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->false_value()));
+ return ToApiHandle<Boolean>(isolate->factory()->false_value());
}
@@ -949,7 +978,7 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
result = Utils::OpenHandle(*ObjectTemplate::New());
Utils::OpenHandle(this)->set_prototype_template(*result);
}
- return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
+ return ToApiHandle<ObjectTemplate>(result);
}
@@ -961,6 +990,22 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
}
+// TODO(dcarney): Remove this abstraction when old callbacks are removed.
+class CallHandlerHelper {
+ public:
+ static inline void Set(Local<FunctionTemplate> function_template,
+ InvocationCallback callback,
+ v8::Handle<Value> data) {
+ function_template->SetCallHandlerInternal(callback, data);
+ }
+ static inline void Set(Local<FunctionTemplate> function_template,
+ FunctionCallback callback,
+ v8::Handle<Value> data) {
+ function_template->SetCallHandler(callback, data);
+ }
+};
+
+
template<typename Callback>
static Local<FunctionTemplate> FunctionTemplateNew(
Callback callback,
@@ -981,7 +1026,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ CallHandlerHelper::Set(Utils::ToLocal(obj), callback, data);
}
obj->set_length(length);
obj->set_undetectable(false);
@@ -1044,8 +1089,7 @@ template<typename Operation>
static Local<Operation> NewDescriptor(
Isolate* isolate,
const i::DeclaredAccessorDescriptorData& data,
- Data* previous_descriptor
- ) {
+ Data* previous_descriptor) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::DeclaredAccessorDescriptor> previous =
i::Handle<i::DeclaredAccessorDescriptor>();
@@ -1055,8 +1099,7 @@ static Local<Operation> NewDescriptor(
}
i::Handle<i::DeclaredAccessorDescriptor> descriptor =
i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous);
- return Local<Operation>(
- reinterpret_cast<Operation*>(*Utils::ToLocal(descriptor)));
+ return Utils::Convert<i::DeclaredAccessorDescriptor, Operation>(descriptor);
}
@@ -1227,6 +1270,11 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
FunctionTemplateSetCallHandler(this, callback, data);
}
+void FunctionTemplate::SetCallHandlerInternal(InvocationCallback callback,
+ v8::Handle<Value> data) {
+ FunctionTemplateSetCallHandler(this, callback, data);
+}
+
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Handle<Value> data) {
FunctionTemplateSetCallHandler(this, callback, data);
@@ -1297,13 +1345,14 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
|| EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
ENTER_V8(isolate);
- if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
+ i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
+ if (handle->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
- ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
- Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+ ObjectTemplate::New(ToApiHandle<FunctionTemplate>(handle));
+ handle->set_instance_template(*Utils::OpenHandle(*templ));
}
- i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
- Utils::OpenHandle(this)->instance_template()));
+ i::Handle<i::ObjectTemplateInfo> result(
+ i::ObjectTemplateInfo::cast(handle->instance_template()));
return Utils::ToLocal(result);
}
@@ -1901,7 +1950,7 @@ Local<Script> Script::New(v8::Handle<String> source,
raw_result = *result;
}
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return Local<Script>(ToApi<Script>(result));
+ return ToApiHandle<Script>(result);
}
@@ -1930,7 +1979,7 @@ Local<Script> Script::Compile(v8::Handle<String> source,
isolate->factory()->NewFunctionFromSharedFunctionInfo(
function,
isolate->global_context());
- return Local<Script>(ToApi<Script>(result));
+ return ToApiHandle<Script>(result);
}
@@ -2006,6 +2055,19 @@ Local<Value> Script::Id() {
}
+int Script::GetId() {
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
+ LOG_API(isolate, "Script::Id");
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
+ }
+}
+
+
int Script::GetLineNumber(int code_pos) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
@@ -2054,13 +2116,12 @@ void Script::SetData(v8::Handle<String> data) {
v8::TryCatch::TryCatch()
: isolate_(i::Isolate::Current()),
next_(isolate_->try_catch_handler_address()),
- exception_(isolate_->heap()->the_hole_value()),
- message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
rethrow_(false),
has_terminated_(false) {
+ Reset();
isolate_->RegisterTryCatchHandler(this);
}
@@ -2070,8 +2131,17 @@ v8::TryCatch::~TryCatch() {
if (rethrow_) {
v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate_));
v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ if (HasCaught() && capture_message_) {
+ // If an exception was caught and rethrow_ is indicated, the saved
+ // message, script, and location need to be restored to Isolate TLS
+ // for reuse. capture_message_ needs to be disabled so that DoThrow()
+ // does not create a new message.
+ isolate_->thread_local_top()->rethrowing_message_ = true;
+ isolate_->RestorePendingMessageFromTryCatch(this);
+ }
isolate_->UnregisterTryCatchHandler(this);
v8::ThrowException(exc);
+ ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
} else {
isolate_->UnregisterTryCatchHandler(this);
}
@@ -2132,8 +2202,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
v8::Local<v8::Message> v8::TryCatch::Message() const {
ASSERT(isolate_ == i::Isolate::Current());
- if (HasCaught() && message_ != i::Smi::FromInt(0)) {
- i::Object* message = reinterpret_cast<i::Object*>(message_);
+ i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
+ ASSERT(message->IsJSMessageObject() || message->IsTheHole());
+ if (HasCaught() && !message->IsTheHole()) {
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
@@ -2143,8 +2214,12 @@ v8::Local<v8::Message> v8::TryCatch::Message() const {
void v8::TryCatch::Reset() {
ASSERT(isolate_ == i::Isolate::Current());
- exception_ = isolate_->heap()->the_hole_value();
- message_ = i::Smi::FromInt(0);
+ i::Object* the_hole = isolate_->heap()->the_hole_value();
+ exception_ = the_hole;
+ message_obj_ = the_hole;
+ message_script_ = the_hole;
+ message_start_pos_ = 0;
+ message_end_pos_ = 0;
}
@@ -2574,6 +2649,11 @@ bool Value::IsArrayBuffer() const {
}
+bool Value::IsArrayBufferView() const {
+ return Utils::OpenHandle(this)->IsJSArrayBufferView();
+}
+
+
bool Value::IsTypedArray() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
return false;
@@ -2607,6 +2687,11 @@ TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
+bool Value::IsDataView() const {
+ return Utils::OpenHandle(this)->IsJSDataView();
+}
+
+
bool Value::IsObject() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
@@ -2773,7 +2858,7 @@ Local<String> Value::ToString() const {
str = i::Execution::ToString(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
- return Local<String>(ToApi<String>(str));
+ return ToApiHandle<String>(str);
}
@@ -2793,7 +2878,7 @@ Local<String> Value::ToDetailString() const {
str = i::Execution::ToDetailString(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
- return Local<String>(ToApi<String>(str));
+ return ToApiHandle<String>(str);
}
@@ -2813,14 +2898,14 @@ Local<v8::Object> Value::ToObject() const {
val = i::Execution::ToObject(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
- return Local<v8::Object>(ToApi<Object>(val));
+ return ToApiHandle<Object>(val);
}
Local<Boolean> Value::ToBoolean() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
- return Local<Boolean>(ToApi<Boolean>(obj));
+ return ToApiHandle<Boolean>(obj);
} else {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
@@ -2830,7 +2915,7 @@ Local<Boolean> Value::ToBoolean() const {
ENTER_V8(isolate);
i::Handle<i::Object> val =
isolate->factory()->ToBoolean(obj->BooleanValue());
- return Local<Boolean>(ToApi<Boolean>(val));
+ return ToApiHandle<Boolean>(val);
}
}
@@ -2851,7 +2936,7 @@ Local<Number> Value::ToNumber() const {
num = i::Execution::ToNumber(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
- return Local<Number>(ToApi<Number>(num));
+ return ToApiHandle<Number>(num);
}
@@ -2869,7 +2954,15 @@ Local<Integer> Value::ToInteger() const {
num = i::Execution::ToInteger(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
- return Local<Integer>(ToApi<Integer>(num));
+ return ToApiHandle<Integer>(num);
+}
+
+
+void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ ApiCheck(isolate != NULL && isolate->IsInitialized() && !i::V8::IsDead(),
+ "v8::internal::Internals::CheckInitialized()",
+ "Isolate is not initialized or V8 has died");
}
@@ -2953,6 +3046,14 @@ void v8::ArrayBuffer::CheckCast(Value* that) {
}
+void v8::ArrayBufferView::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSArrayBufferView(),
+ "v8::ArrayBufferView::Cast()",
+ "Could not convert to ArrayBufferView");
+}
+
+
void v8::TypedArray::CheckCast(Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -2979,6 +3080,14 @@ TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
+void v8::DataView::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSDataView(),
+ "v8::DataView::Cast()",
+ "Could not convert to DataView");
+}
+
+
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@@ -3099,7 +3208,7 @@ Local<Int32> Value::ToInt32() const {
num = i::Execution::ToInt32(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
- return Local<Int32>(ToApi<Int32>(num));
+ return ToApiHandle<Int32>(num);
}
@@ -3117,7 +3226,7 @@ Local<Uint32> Value::ToUint32() const {
num = i::Execution::ToUint32(obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
- return Local<Uint32>(ToApi<Uint32>(num));
+ return ToApiHandle<Uint32>(num);
}
@@ -3410,7 +3519,7 @@ bool v8::Object::SetPrototype(Handle<Value> value) {
// to propagate outside.
TryCatch try_catch;
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
+ i::Handle<i::Object> result = i::JSObject::SetPrototype(self, value_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3866,7 +3975,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
- if (result->IsUndefined()) return v8::Local<v8::Value>();
+ if (result->IsTheHole()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -4280,6 +4389,7 @@ int Function::GetScriptColumnNumber() const {
return kLineOffsetNotFound;
}
+
Handle<Value> Function::GetScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (!func->shared()->script()->IsScript())
@@ -4288,6 +4398,15 @@ Handle<Value> Function::GetScriptId() const {
return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
}
+
+int Function::ScriptId() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return script->id()->value();
+}
+
+
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -5132,7 +5251,7 @@ bool v8::V8::Initialize() {
if (isolate != NULL && isolate->IsInitialized()) {
return true;
}
- return InitializeHelper();
+ return InitializeHelper(isolate);
}
@@ -5148,7 +5267,30 @@ void v8::V8::SetReturnAddressLocationResolver(
bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
+ return SetFunctionEntryHook(Isolate::GetCurrent(), entry_hook);
+}
+
+
+bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
+ FunctionEntryHook entry_hook) {
+ ASSERT(ext_isolate != NULL);
+ ASSERT(entry_hook != NULL);
+
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
+
+ // The entry hook can only be set before the Isolate is initialized, as
+ // otherwise the Isolate's code stubs generated at initialization won't
+ // contain entry hooks.
+ if (isolate->IsInitialized())
+ return false;
+
+ // Setting an entry hook is a one-way operation, once set, it cannot be
+ // changed or unset.
+ if (isolate->function_entry_hook() != NULL)
+ return false;
+
+ isolate->set_function_entry_hook(entry_hook);
+ return true;
}
@@ -5359,7 +5501,7 @@ static i::Handle<i::Context> CreateEnvironment(
return env;
}
-
+#ifdef V8_USE_UNSAFE_HANDLES
Persistent<Context> v8::Context::New(
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
@@ -5376,6 +5518,7 @@ Persistent<Context> v8::Context::New(
if (env.is_null()) return Persistent<Context>();
return Persistent<Context>::New(external_isolate, Utils::ToLocal(env));
}
+#endif
Local<Context> v8::Context::New(
@@ -6161,11 +6304,17 @@ void v8::ArrayBuffer::Neuter() {
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
- for (i::Handle<i::Object> array_obj(obj->weak_first_array(), isolate);
- *array_obj != i::Smi::FromInt(0);) {
- i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*array_obj));
- typed_array->Neuter();
- array_obj = i::handle(typed_array->weak_next(), isolate);
+ for (i::Handle<i::Object> view_obj(obj->weak_first_view(), isolate);
+ !view_obj->IsUndefined();) {
+ i::Handle<i::JSArrayBufferView> view(i::JSArrayBufferView::cast(*view_obj));
+ if (view->IsJSTypedArray()) {
+ i::JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ i::JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ view_obj = i::handle(view->weak_next(), isolate);
}
obj->Neuter();
}
@@ -6203,33 +6352,35 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
}
-Local<ArrayBuffer> v8::TypedArray::Buffer() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::Buffer()"))
- return Local<ArrayBuffer>();
- i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
ASSERT(obj->buffer()->IsJSArrayBuffer());
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
return Utils::ToLocal(buffer);
}
-size_t v8::TypedArray::ByteOffset() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::ByteOffset()")) return 0;
- i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+size_t v8::ArrayBufferView::ByteOffset() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_offset()->Number());
}
-size_t v8::TypedArray::ByteLength() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::ByteLength()")) return 0;
- i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+size_t v8::ArrayBufferView::ByteLength() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
+void* v8::ArrayBufferView::BaseAddress() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ void* buffer_data = buffer->backing_store();
+ size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
+ return static_cast<uint8_t*>(buffer_data) + byte_offset;
+}
+
+
size_t v8::TypedArray::Length() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
@@ -6238,16 +6389,28 @@ size_t v8::TypedArray::Length() {
}
-void* v8::TypedArray::BaseAddress() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::BaseAddress()")) return NULL;
- i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
- void* buffer_data = buffer->backing_store();
- size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
- return static_cast<uint8_t*>(buffer_data) + byte_offset;
-}
+static inline void SetupArrayBufferView(
+ i::Isolate* isolate,
+ i::Handle<i::JSArrayBufferView> obj,
+ i::Handle<i::JSArrayBuffer> buffer,
+ size_t byte_offset,
+ size_t byte_length) {
+ ASSERT(byte_offset + byte_length <=
+ static_cast<size_t>(buffer->byte_length()->Number()));
+
+ obj->set_buffer(*buffer);
+
+ obj->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*obj);
+
+ i::Handle<i::Object> byte_offset_object =
+ isolate->factory()->NewNumberFromSize(byte_offset);
+ obj->set_byte_offset(*byte_offset_object);
+ i::Handle<i::Object> byte_length_object =
+ isolate->factory()->NewNumberFromSize(byte_length);
+ obj->set_byte_length(*byte_length_object);
+}
template<typename ElementType,
ExternalArrayType array_type,
@@ -6260,24 +6423,12 @@ i::Handle<i::JSTypedArray> NewTypedArray(
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
ASSERT(byte_offset % sizeof(ElementType) == 0);
- ASSERT(byte_offset + length * sizeof(ElementType) <=
- static_cast<size_t>(buffer->byte_length()->Number()));
- obj->set_buffer(*buffer);
+ SetupArrayBufferView(
+ isolate, obj, buffer, byte_offset, length * sizeof(ElementType));
- obj->set_weak_next(buffer->weak_first_array());
- buffer->set_weak_first_array(*obj);
-
- i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
- static_cast<double>(byte_offset));
- obj->set_byte_offset(*byte_offset_object);
-
- i::Handle<i::Object> byte_length_object = isolate->factory()->NewNumber(
- static_cast<double>(length * sizeof(ElementType)));
- obj->set_byte_length(*byte_length_object);
-
- i::Handle<i::Object> length_object = isolate->factory()->NewNumber(
- static_cast<double>(length));
+ i::Handle<i::Object> length_object =
+ isolate->factory()->NewNumberFromSize(length);
obj->set_length(*length_object);
i::Handle<i::ExternalArray> elements =
@@ -6326,6 +6477,20 @@ TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
#undef TYPED_ARRAY_NEW
+Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t byte_length) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(
+ isolate, "v8::DataView::New(void*, size_t, size_t)");
+ LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
+ ENTER_V8(isolate);
+ i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+ SetupArrayBufferView(
+ isolate, obj, buffer, byte_offset, byte_length);
+ return Utils::ToLocal(obj);
+}
+
Local<Symbol> v8::Symbol::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6538,10 +6703,11 @@ CpuProfiler* Isolate::GetCpuProfiler() {
v8::Local<v8::Context> Isolate::GetCurrentContext() {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Handle<i::Object> current = internal_isolate->native_context();
- if (current.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
- return Utils::ToLocal(context);
+ i::Context* context = internal_isolate->context();
+ if (context == NULL) return Local<Context>();
+ i::Context* native_context = context->global_object()->native_context();
+ if (native_context == NULL) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>(native_context));
}
@@ -6549,24 +6715,27 @@ void Isolate::SetObjectGroupId(const Persistent<Value>& object,
UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetObjectGroupId(
- reinterpret_cast<i::Object**>(*object), id);
+ Utils::OpenPersistent(object).location(),
+ id);
}
void Isolate::SetReferenceFromGroup(UniqueId id,
const Persistent<Value>& object) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- internal_isolate->global_handles()
- ->SetReferenceFromGroup(id, reinterpret_cast<i::Object**>(*object));
+ internal_isolate->global_handles()->SetReferenceFromGroup(
+ id,
+ Utils::OpenPersistent(object).location());
}
void Isolate::SetReference(const Persistent<Object>& parent,
const Persistent<Value>& child) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Object** parent_location = Utils::OpenPersistent(parent).location();
internal_isolate->global_handles()->SetReference(
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
- reinterpret_cast<i::Object**>(*child));
+ reinterpret_cast<i::HeapObject**>(parent_location),
+ Utils::OpenPersistent(child).location());
}
@@ -7171,22 +7340,31 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
- return Handle<String>(ToApi<String>(
- isolate->factory()->InternalizeUtf8String(entry->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(entry->name()));
} else {
- return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
+ return ToApiHandle<String>(isolate->factory()->NewConsString(
isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
- isolate->factory()->InternalizeUtf8String(entry->name()))));
+ isolate->factory()->InternalizeUtf8String(entry->name())));
}
}
+int CpuProfileNode::GetScriptId() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptId");
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ const i::CodeEntry* entry = node->entry();
+ return entry->script_id();
+}
+
+
Handle<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- node->entry()->resource_name())));
+ return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
+ node->entry()->resource_name()));
}
@@ -7277,8 +7455,8 @@ Handle<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- profile->title())));
+ return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
+ profile->title()));
}
@@ -7301,33 +7479,11 @@ int CpuProfile::GetSamplesCount() const {
}
-int CpuProfiler::GetProfilesCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- return profiler->GetProfilesCount();
-}
-
-
int CpuProfiler::GetProfileCount() {
return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
}
-const CpuProfile* CpuProfiler::GetProfile(int index,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- return reinterpret_cast<const CpuProfile*>(
- profiler->GetProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- index));
-}
-
-
const CpuProfile* CpuProfiler::GetCpuProfile(int index,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
@@ -7343,19 +7499,6 @@ const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
}
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- return reinterpret_cast<const CpuProfile*>(
- profiler->FindProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- uid));
-}
-
-
const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
@@ -7365,34 +7508,12 @@ const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
}
-void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- profiler->StartProfiling(*Utils::OpenHandle(*title), record_samples);
-}
-
-
void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
}
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- return reinterpret_cast<const CpuProfile*>(
- profiler->StopProfiling(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- *Utils::OpenHandle(*title)));
-}
-
-
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
@@ -7410,15 +7531,6 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
}
-void CpuProfiler::DeleteAllProfiles() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- ASSERT(profiler != NULL);
- profiler->DeleteAllProfiles();
-}
-
-
void CpuProfiler::DeleteAllCpuProfiles() {
reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
}
@@ -7446,12 +7558,12 @@ Handle<Value> HeapGraphEdge::GetName() const {
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(
- isolate->factory()->InternalizeUtf8String(edge->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(
- isolate->factory()->NewNumberFromInt(edge->index())));
+ return ToApiHandle<Number>(
+ isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
}
return v8::Undefined();
@@ -7490,8 +7602,8 @@ HeapGraphNode::Type HeapGraphNode::GetType() const {
Handle<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- ToInternal(this)->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
@@ -7528,9 +7640,9 @@ v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
- return v8::Handle<Value>(!object.is_null() ?
- ToApi<Value>(object) : ToApi<Value>(
- isolate->factory()->undefined_value()));
+ return !object.is_null() ?
+ ToApiHandle<Value>(object) :
+ ToApiHandle<Value>(isolate->factory()->undefined_value());
}
@@ -7552,13 +7664,6 @@ void HeapSnapshot::Delete() {
}
-HeapSnapshot::Type HeapSnapshot::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
- return kFull;
-}
-
-
unsigned HeapSnapshot::GetUid() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
@@ -7569,8 +7674,8 @@ unsigned HeapSnapshot::GetUid() const {
Handle<String> HeapSnapshot::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- ToInternal(this)->title())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(ToInternal(this)->title()));
}
@@ -7629,72 +7734,23 @@ void HeapSnapshot::Serialize(OutputStream* stream,
}
-int HeapProfiler::GetSnapshotsCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
- return isolate->heap_profiler()->GetSnapshotsCount();
-}
-
-
int HeapProfiler::GetSnapshotCount() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
}
-const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- isolate->heap_profiler()->GetSnapshot(index));
-}
-
-
const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
}
-const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- isolate->heap_profiler()->FindSnapshot(uid));
-}
-
-
-const HeapSnapshot* HeapProfiler::FindHeapSnapshot(unsigned uid) {
- return reinterpret_cast<const HeapSnapshot*>(
- reinterpret_cast<i::HeapProfiler*>(this)->FindSnapshot(uid));
-}
-
-
-SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- return isolate->heap_profiler()->GetSnapshotObjectId(obj);
-}
-
-
SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
}
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
- HeapSnapshot::Type type,
- ActivityControl* control,
- ObjectNameResolver* resolver) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- isolate->heap_profiler()->TakeSnapshot(
- *Utils::OpenHandle(*title), control, resolver));
-}
-
-
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
Handle<String> title,
ActivityControl* control,
@@ -7705,61 +7761,26 @@ const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
}
-void HeapProfiler::StartHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
- isolate->heap_profiler()->StartHeapObjectsTracking();
-}
-
-
void HeapProfiler::StartTrackingHeapObjects() {
reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
}
-void HeapProfiler::StopHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
- isolate->heap_profiler()->StopHeapObjectsTracking();
-}
-
-
void HeapProfiler::StopTrackingHeapObjects() {
reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
}
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
- return isolate->heap_profiler()->PushHeapObjectsStats(stream);
-}
-
-
SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream) {
return reinterpret_cast<i::HeapProfiler*>(this)->PushHeapObjectsStats(stream);
}
-void HeapProfiler::DeleteAllSnapshots() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
- isolate->heap_profiler()->DeleteAllSnapshots();
-}
-
-
void HeapProfiler::DeleteAllHeapSnapshots() {
reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
}
-void HeapProfiler::DefineWrapperClass(uint16_t class_id,
- WrapperInfoCallback callback) {
- i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
- callback);
-}
-
-
void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
WrapperInfoCallback callback) {
reinterpret_cast<i::HeapProfiler*>(this)->DefineWrapperClass(class_id,
@@ -7767,17 +7788,6 @@ void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
}
-int HeapProfiler::GetPersistentHandleCount() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->global_handles()->NumberOfGlobalHandles();
-}
-
-
-size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- return i::Isolate::Current()->heap_profiler()->GetMemorySizeUsedByProfiler();
-}
-
-
size_t HeapProfiler::GetProfilerMemorySize() {
return reinterpret_cast<i::HeapProfiler*>(this)->
GetMemorySizeUsedByProfiler();
@@ -8002,4 +8012,55 @@ void DeferredHandles::Iterate(ObjectVisitor* v) {
}
+v8::Handle<v8::Value> InvokeAccessorGetter(
+ v8::Local<v8::String> property,
+ const v8::AccessorInfo& info,
+ v8::AccessorGetter getter) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
+ getter));
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, getter_address);
+ return getter(property, info);
+}
+
+
+void InvokeAccessorGetterCallback(
+ v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info,
+ v8::AccessorGetterCallback getter) {
+ // Leaving JavaScript.
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
+ getter));
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, getter_address);
+ return getter(property, info);
+}
+
+
+v8::Handle<v8::Value> InvokeInvocationCallback(
+ const v8::Arguments& args,
+ v8::InvocationCallback callback) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ Address callback_address =
+ reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, callback_address);
+ return callback(args);
+}
+
+
+void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ v8::FunctionCallback callback) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ Address callback_address =
+ reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, callback_address);
+ return callback(info);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 3c141f7097..0f33bc815f 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -170,6 +170,7 @@ class RegisteredExtension {
V(Object, JSObject) \
V(Array, JSArray) \
V(ArrayBuffer, JSArrayBuffer) \
+ V(ArrayBufferView, JSArrayBufferView) \
V(TypedArray, JSTypedArray) \
V(Uint8Array, JSTypedArray) \
V(Uint8ClampedArray, JSTypedArray) \
@@ -180,6 +181,7 @@ class RegisteredExtension {
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
+ V(DataView, JSDataView) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
@@ -217,6 +219,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+ static inline Local<ArrayBufferView> ToLocal(
+ v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
+ static inline Local<DataView> ToLocal(
+ v8::internal::Handle<v8::internal::JSDataView> obj);
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
@@ -273,13 +279,31 @@ class Utils {
OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
-};
+ template<class From, class To>
+ static inline Local<To> Convert(v8::internal::Handle<From> obj) {
+ ASSERT(obj.is_null() || !obj->IsTheHole());
+ return Local<To>(reinterpret_cast<To*>(obj.location()));
+ }
-template <class T>
-inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
- return reinterpret_cast<T*>(obj.location());
-}
+ template <class T>
+ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ const v8::Persistent<T>& persistent) {
+ return v8::internal::Handle<v8::internal::Object>(
+ reinterpret_cast<v8::internal::Object**>(persistent.val_));
+ }
+
+ template <class T>
+ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ v8::Persistent<T>* persistent) {
+ return OpenPersistent(*persistent);
+ }
+
+ template <class From, class To>
+ static inline v8::internal::Handle<To> OpenHandle(v8::Local<From> handle) {
+ return OpenHandle(*handle);
+ }
+};
template <class T>
@@ -293,31 +317,31 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
}
-class InternalHandleHelper {
- public:
- template<class From, class To>
- static inline Local<To> Convert(v8::internal::Handle<From> obj) {
- return Local<To>(reinterpret_cast<To*>(obj.location()));
- }
-};
+template <class T>
+inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+ return reinterpret_cast<T*>(obj.location());
+}
+
+template <class T>
+inline v8::Local<T> ToApiHandle(
+ v8::internal::Handle<v8::internal::Object> obj) {
+ return Utils::Convert<v8::internal::Object, T>(obj);
+}
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
- ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return InternalHandleHelper::Convert<v8::internal::From, v8::To>(obj); \
+ return Convert<v8::internal::From, v8::To>(obj); \
}
#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
- ASSERT(obj.is_null() || !obj->IsTheHole()); \
ASSERT(obj->type() == typeConst); \
- return InternalHandleHelper:: \
- Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
+ return Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
}
@@ -330,6 +354,8 @@ MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
+MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
@@ -662,6 +688,24 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
}
+// Interceptor functions called from generated inline caches to notify
+// CPU profiler that external callbacks are invoked.
+v8::Handle<v8::Value> InvokeAccessorGetter(
+ v8::Local<v8::String> property,
+ const v8::AccessorInfo& info,
+ v8::AccessorGetter getter);
+
+
+void InvokeAccessorGetterCallback(
+ v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info,
+ v8::AccessorGetterCallback getter);
+
+v8::Handle<v8::Value> InvokeInvocationCallback(const v8::Arguments& args,
+ v8::InvocationCallback callback);
+void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ v8::FunctionCallback callback);
+
class Testing {
public:
static v8::Testing::StressType stress_type() { return stress_type_; }
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index 091d0b92a4..11d9279e81 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -28,6 +28,8 @@
#include "v8.h"
#include "arguments.h"
+#include "vm-state-inl.h"
+
namespace v8 {
namespace internal {
@@ -82,7 +84,7 @@ v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
Object** handle = &this->end()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Handle<V>();
- return v8::Handle<V>(reinterpret_cast<V*>(handle));
+ return Utils::Convert<Object, V>(Handle<Object>(handle));
}
@@ -90,6 +92,8 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(InvocationCallback f) {
Isolate* isolate = this->isolate();
void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
if (new_style) {
FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
FunctionCallbackInfo<v8::Value> info(end(),
@@ -114,6 +118,8 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f) { \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
@@ -132,6 +138,8 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
@@ -151,6 +159,8 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
@@ -170,6 +180,8 @@ void PropertyCallbackArguments::Call(OldFunction f, \
Isolate* isolate = this->isolate(); \
void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
if (new_style) { \
NewFunction c = reinterpret_cast<NewFunction>(f); \
PropertyCallbackInfo<ReturnValue> info(end()); \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 1e0d5c1f7d..bfe9bc8335 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -149,6 +149,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -179,24 +180,22 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -286,8 +285,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
@@ -314,8 +313,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index c6ea6006fe..89c0a3b3cd 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -36,7 +36,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 4d7bc8ef2f..69ba00ac5c 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
@@ -104,360 +104,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand::Zero());
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
- } else {
- Label loop, entry;
- __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ b(&entry);
- __ bind(&loop);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(scratch1, scratch2);
- __ b(lt, &loop);
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ tst(array_size, array_size);
- __ Assert(ne, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size));
- __ Allocate(elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- __ add(elements_array_end,
- elements_array_storage,
- Operand::PointerOffsetFromSmiKey(array_size));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand::Zero());
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Set up return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ tst(r2, r2);
- __ b(ne, &not_empty_array);
- __ Drop(1); // Adjust stack.
- __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
- __ b(&empty_array);
-
- __ bind(&not_empty_array);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ SmiTag(r2, r0);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ mov(r7, sp);
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r2, &has_non_smi_element);
- }
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- __ bind(&finish);
- __ mov(sp, r7);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(r3, r4);
- __ b(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // r3: JSArray
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r2,
- r9,
- &cant_transition_map);
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ RecordWriteField(r3,
- HeapObject::kMapOffset,
- r2,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ sub(r7, r7, Operand(kPointerSize));
- __ bind(&loop2);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ cmp(r4, r5);
- __ b(lt, &loop2);
- __ b(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -480,20 +126,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -518,56 +153,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r2 : type info cell
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(r3);
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1125,6 +717,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: argc
// r4: argv
// r5-r7, cp may be clobbered
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand::Zero());
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index b26bf7ede2..6af5ccea38 100644..100755
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "bootstrapper.h"
#include "code-stubs.h"
@@ -892,12 +892,17 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for
// internalized-internalized.
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ Label not_internalized;
STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(ne, &return_not_equal);
+ __ and_(r2, r2, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ cmp(r2, Operand(kInternalizedTag | kStringTag));
+ __ b(ne, &not_internalized); // r2 (rhs) is not an internalized string
+
+ __ and_(r3, r3, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ cmp(r3, Operand(kInternalizedTag | kStringTag));
+ __ b(eq, &return_not_equal); // both rhs and lhs are internalized strings
+
+ __ bind(&not_internalized);
}
@@ -937,7 +942,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- // Ensure that no non-strings have the internalized bit set.
Label object_test;
STATIC_ASSERT(kInternalizedTag != 0);
__ tst(r2, Operand(kIsNotStringMask));
@@ -2075,7 +2079,7 @@ void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label right_arg_changed, call_runtime;
- if (op_ == Token::MOD && has_fixed_right_arg_) {
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
@@ -2252,7 +2256,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// to type transition.
} else {
- if (has_fixed_right_arg_) {
+ if (encoded_right_arg_.has_value) {
__ Vmov(d8, fixed_right_arg_value(), scratch1);
__ VFPCompareAndSetFlags(d1, d8);
__ b(ne, &transition);
@@ -2996,9 +3000,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
- if (FLAG_optimize_constructed_arrays) {
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
- }
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@@ -3075,7 +3077,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (FLAG_debug_code) {
@@ -3179,6 +3181,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Result returned in r0 or r0+r1 by default.
// NOTE: Invocations of builtins may return failure objects
@@ -3269,6 +3273,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
// Save callee-saved registers (incl. cp and fp), sp, and lr
@@ -3504,7 +3510,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -4623,52 +4629,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
-
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
- ASSERT(FLAG_optimize_constructed_arrays);
Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -4677,7 +4643,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->the_hole_value());
// Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -4689,12 +4655,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Special handling of the Array() function, which caches not only the
// monomorphic Array function but the initial ElementsKind with special
// sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(r3, &miss);
- __ cmp(r3, Operand(terminal_kind_sentinel));
- __ b(gt, &miss);
+ if (FLAG_debug_code) {
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(r3, Operand(terminal_kind_sentinel));
+ __ Assert(le, "Array function sentinel is not an ElementsKind");
+ }
+
// Make sure the function is the Array() function
__ LoadArrayFunction(r3);
__ cmp(r1, r3);
@@ -4711,7 +4680,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// write-barrier is needed.
__ bind(&megamorphic);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
@@ -4729,11 +4698,11 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
GetInitialFastElementsKind());
__ mov(r3, Operand(initial_kind_sentinel));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ b(&done);
__ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -4772,11 +4741,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now.
@@ -4809,7 +4774,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
}
// Check for function proxy.
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
@@ -4851,15 +4816,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
+ Register jmp_reg = r3;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -6263,9 +6224,14 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(eq, &miss);
+
+ __ and_(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ cmp(tmp1, Operand(kInternalizedTag | kStringTag));
+ __ b(ne, &miss);
+
+ __ and_(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ cmp(tmp2, Operand(kInternalizedTag | kStringTag));
+ __ b(ne, &miss);
// Internalized strings are compared by identity.
__ cmp(left, right);
@@ -6304,19 +6270,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- Label succeed1;
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(ne, &succeed1);
- __ cmp(tmp1, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ tst(tmp2, Operand(kIsInternalizedMask));
- __ b(ne, &succeed2);
- __ cmp(tmp2, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed2);
+ __ JumpIfNotUniqueName(tmp1, &miss);
+ __ JumpIfNotUniqueName(tmp2, &miss);
// Unique names are compared by identity.
__ cmp(left, right);
@@ -6371,7 +6326,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
// Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
+ // because we already know they are not identical. We know they are both
+ // strings.
if (equality) {
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag != 0);
@@ -6482,13 +6438,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(r2, Operand(function));
- GenerateCall(masm, r2);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t code =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
@@ -6564,11 +6513,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsInternalizedMask));
- __ b(ne, &good);
- __ cmp(entity_name, Operand(SYMBOL_TYPE));
- __ b(ne, miss);
-
+ __ JumpIfNotUniqueName(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -6735,15 +6680,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
- Label cont;
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsInternalizedMask));
- __ b(ne, &cont);
- __ cmp(entry_key, Operand(SYMBOL_TYPE));
- __ b(ne, &maybe_in_dictionary);
- __ bind(&cont);
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
}
}
@@ -7062,10 +7002,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
// -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers r1, r2, r4
// -----------------------------------
Label element_done;
@@ -7074,6 +7014,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+
__ CheckFastElements(r2, r5, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
@@ -7133,8 +7078,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
+ AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -7148,9 +7094,21 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int32_t kReturnAddressDistanceFromFunctionStart =
3 * Assembler::kInstrSize;
- // Save live volatile registers.
- __ Push(lr, r5, r1);
- const int32_t kNumSavedRegs = 3;
+ // This should contain all kCallerSaved registers.
+ const RegList kSavedRegs =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 5 | // r5
+ 1 << 9; // r9
+ // We also save lr, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = 7;
+
+ ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ stm(db_w, sp, kSavedRegs | lr.bit());
// Compute the function's address for the first argument.
__ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
@@ -7167,15 +7125,14 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ and_(sp, sp, Operand(-frame_alignment));
}
-#if defined(V8_HOST_ARCH_ARM)
- __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ ldr(ip, MemOperand(ip));
+#if V8_HOST_ARCH_ARM
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+ __ mov(ip, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
@@ -7187,8 +7144,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ mov(sp, r5);
}
- __ Pop(lr, r5, r1);
- __ Ret();
+ // Also pop pc to get Ret(0).
+ __ ldm(ia_w, sp, kSavedRegs | pc.bit());
}
@@ -7244,6 +7201,10 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &normal_sequence);
+ // The type cell may have gone megamorphic, don't overwrite if so
+ __ ldr(r5, FieldMemOperand(r2, kPointerSize));
+ __ JumpIfNotSmi(r5, &normal_sequence);
+
// Save the resulting elements kind in type info
__ SmiTag(r3);
__ str(r3, FieldMemOperand(r2, kPointerSize));
@@ -7276,7 +7237,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, true);
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7332,64 +7293,50 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
- // We should either have undefined in ebx or a valid jsglobalpropertycell
+ // We should either have undefined in ebx or a valid cell
Label okay_here;
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(global_property_cell_map));
+ __ cmp(r3, Operand(cell_map));
__ Assert(eq, "Expected property cell in register ebx");
__ bind(&okay_here);
}
- if (FLAG_optimize_constructed_arrays) {
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
- __ cmp(r2, Operand(undefined_sentinel));
- __ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ JumpIfNotSmi(r3, &no_info);
- __ SmiUntag(r3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(r3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ Label no_info, switch_ready;
+ // Get the elements kind and case on that.
+ __ cmp(r2, Operand(undefined_sentinel));
+ __ b(eq, &no_info);
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ __ JumpIfNotSmi(r3, &no_info);
+ __ SmiUntag(r3);
+ __ jmp(&switch_ready);
+ __ bind(&no_info);
+ __ mov(r3, Operand(GetInitialFastElementsKind()));
+ __ bind(&switch_ready);
+
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ CreateArrayDispatchOneArgument(masm);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ UNREACHABLE();
}
}
@@ -7451,45 +7398,31 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(eq, "Unexpected initial map for Array function");
}
- if (FLAG_optimize_constructed_arrays) {
- // Figure out the right elements kind
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmp(r3, Operand(FAST_ELEMENTS));
- __ b(eq, &done);
- __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
- __ Assert(eq,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
- __ bind(&done);
- }
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
- Label fast_elements_case;
+ if (FLAG_debug_code) {
+ Label done;
__ cmp(r3, Operand(FAST_ELEMENTS));
- __ b(eq, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ b(eq, &done);
+ __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
- __ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ Label fast_elements_case;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 863848cc37..1f663f52e9 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -585,7 +585,6 @@ class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 5b2980aeb4..60de5fc4f7 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index a13048476d..7d59a84b1d 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 101cd9f143..8766a24bb2 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -32,7 +32,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "cpu.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 2f0a7c4e54..7faea08034 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index f55552df5b..6101bec947 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -56,7 +56,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
#include "disasm.h"
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index f5a7dbd3ee..b2071807d2 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -27,12 +27,12 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "assembler.h"
#include "assembler-arm.h"
#include "assembler-arm-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
#include "macro-assembler.h"
#include "macro-assembler-arm.h"
@@ -40,11 +40,6 @@ namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 8b24bf10c9..41f02be259 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "code-stubs.h"
#include "codegen.h"
@@ -129,7 +129,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -327,9 +327,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
@@ -345,7 +345,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
@@ -361,7 +361,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
@@ -404,7 +404,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -1164,15 +1164,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
+ __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1694,10 +1692,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
+ __ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1831,20 +1829,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
__ push(r0);
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ str(result_register(), FieldMemOperand(r1, offset));
// Update the write barrier for the array store.
@@ -1852,10 +1848,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
__ mov(r3, Operand(Smi::FromInt(i)));
- __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1864,6 +1857,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(r0);
@@ -1991,22 +1985,37 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::INITIAL:
- case Yield::SUSPEND: {
- VisitForStackValue(expr->generator_object());
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r1);
+ __ b(eq, &post_runtime);
+ __ push(r0); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- Label resume;
- __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
- __ b(ne, &resume);
- if (expr->yield_kind() == Yield::SUSPEND) {
- EmitReturnIteratorResult(false);
- } else {
- __ pop(result_register());
- EmitReturnSequence();
- }
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -2018,7 +2027,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- EmitReturnIteratorResult(true);
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
break;
}
@@ -2029,76 +2041,66 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&l_next);
- // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(r3); // iter
__ push(r0); // exception
- __ mov(r0, r3); // iter
- __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
- Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(throw_ic); // iter.throw in r0
__ jmp(&l_call);
- // try { received = yield result.value }
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
__ bind(&l_try);
- __ pop(r0); // result.value
+ __ pop(r0); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(r0); // result.value
- __ ldr(r3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size)); // g
- __ push(r3); // g
+ __ push(r0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ ldr(r0, MemOperand(sp, generator_object_depth));
+ __ push(r0); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- __ b(ne, &l_resume);
- EmitReturnIteratorResult(false);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(r0); // result
+ EmitReturnSequence();
__ bind(&l_resume); // received in r0
__ PopTryHandler();
- // receiver = iter; f = iter.next; arg = received;
+ // receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
+ __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(r3); // iter
__ push(r0); // received
- __ mov(r0, r3); // iter
- __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
- Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(next_ic); // iter.next in r0
- // result = f.call(receiver, arg);
+ // result = receiver[f](arg);
__ bind(&l_call);
- Label l_call_runtime;
- __ JumpIfSmi(r0, &l_call_runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &l_call_runtime);
- __ mov(r1, r0);
- ParameterCount count(1);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&l_loop);
- __ bind(&l_call_runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCall, 3);
- // val = result.value; if (!result.done) goto l_try;
+ // if (!result.done) goto l_try;
__ bind(&l_loop);
- // result.value
__ push(r0); // save result
- __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in r0
- __ pop(r1); // result
- __ push(r0); // result.value
- __ mov(r0, r1); // result
__ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in r0
@@ -2108,7 +2110,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ b(eq, &l_try);
// result.value
- __ pop(r0); // result.value
+ __ pop(r0); // result
+ __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in r0
context()->DropAndPlug(2, r0); // drop iter and g
break;
}
@@ -2149,7 +2154,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ sub(r3, r3, Operand(1), SetCC);
+ __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
__ b(mi, &push_frame);
__ push(r2);
__ jmp(&push_argument_holes);
@@ -2214,13 +2219,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
-void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ mov(r1, Operand(map));
@@ -2240,33 +2252,13 @@ void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
// root set.
__ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
- if (done) {
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- }
-
- EmitReturnSequence();
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
+ __ mov(r2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -2421,7 +2413,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(r1, r0);
__ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2550,7 +2542,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
Handle<Code> ic = is_classic_mode()
@@ -2683,8 +2675,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(r2, Operand(cell));
@@ -2825,7 +2816,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2879,8 +2870,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(r2, Operand(cell));
@@ -3434,7 +3424,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3853,7 +3843,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
@@ -4526,7 +4516,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 87865b2f67..89ebfde668 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "assembler-arm.h"
#include "code-stubs.h"
@@ -321,7 +321,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
__ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
- // Is the string internalized?
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
@@ -1581,8 +1582,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fbb9c6ef8b..b08353e069 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -41,24 +41,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
@@ -81,14 +63,6 @@ void LInstruction::VerifyCall() {
#endif
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -352,8 +326,7 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
+ ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -451,7 +424,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -929,7 +902,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -940,13 +913,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
@@ -958,6 +933,21 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -982,10 +972,13 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
@@ -1386,19 +1379,6 @@ bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (CpuFeatures::IsSupported(SUDIV)) {
// A value with an integer representation does not need to be transformed.
@@ -1456,7 +1436,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->has_fixed_right_arg()) {
+ } else if (instr->fixed_right_arg().has_value) {
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
@@ -1816,13 +1796,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -2019,7 +1992,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
@@ -2416,6 +2389,14 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2588,8 +2569,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->inlining_kind(),
instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index ccfd0dbece..39cab8fdb3 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -98,7 +99,6 @@ class LCodeGen;
V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
- V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -489,17 +489,44 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
@@ -1236,7 +1263,7 @@ class LBranch: public LControlInstruction<1, 0> {
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch: public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1249,29 +1276,7 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
@@ -2401,6 +2406,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
@@ -2444,6 +2450,21 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LAllocateObject(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2546,26 +2567,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry();
+ LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2692,7 +2697,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 96befb0c0d..272db157a8 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -62,7 +62,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
@@ -87,20 +87,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
- for (int i = 0 ; i < transition_maps_.length(); i++) {
- transition_maps_.at(i)->AddDependentCode(
- DependentCode::kTransitionGroup, code);
- }
- if (graph()->depends_on_empty_array_proto_elements()) {
- isolate()->initial_object_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- isolate()->initial_array_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- }
+ info()->CommitDependencies(code);
}
@@ -593,27 +580,15 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
+ WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
@@ -645,60 +620,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
}
+ continue;
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
+ environment->HasUint32ValueAt(i));
}
}
@@ -706,17 +650,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ bool is_uint32) {
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -823,8 +758,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -832,7 +765,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -865,7 +798,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_trap_on_deopt) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
__ stop("trap_on_deopt", cc);
}
@@ -1160,7 +1093,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ // Record the address of the first unknown OSR value as the place to enter.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
@@ -1194,12 +1128,12 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
- } else if (hmod->has_fixed_right_arg()) {
+ } else if (hmod->fixed_right_arg().has_value) {
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
- int32_t divisor = hmod->fixed_right_arg_value();
+ int32_t divisor = hmod->fixed_right_arg().value;
ASSERT(IsPowerOf2(divisor));
// Check if our assumption of a fixed right operand still holds.
@@ -1912,13 +1846,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
@@ -1946,10 +1873,12 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register map = ToRegister(instr->temp());
Label done;
- // If the object is a smi return the object.
- __ SmiTst(input);
- __ Move(result, input, eq);
- __ b(eq, &done);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ SmiTst(input);
+ __ Move(result, input, eq);
+ __ b(eq, &done);
+ }
// If the object is not a value type, return the object.
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
@@ -2199,11 +2128,12 @@ int LCodeGen::GetNextEmittedBlock() const {
return -1;
}
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
int next_block = GetNextEmittedBlock();
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
@@ -2224,22 +2154,19 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(instr, ne);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
DwVfpRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
__ VFPCompareAndSetFlags(reg, 0.0);
- __ cmp(r0, r0, vs); // If NaN, set the Z flag.
- EmitBranch(true_block, false_block, ne);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
+ EmitBranch(instr, ne);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -2247,42 +2174,55 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (type.IsBoolean()) {
ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
} else if (type.IsSmi()) {
ASSERT(!info()->IsStub());
__ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ DwVfpRegister dbl_scratch = double_scratch0();
+ __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
+ EmitBranch(instr, ne);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ cmp(ip, Operand::Zero());
+ EmitBranch(instr, ne);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
+ __ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ cmp(reg, Operand::Zero());
- __ b(eq, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ b(eq, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
@@ -2297,14 +2237,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
+ __ b(ne, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, true_label);
+ __ b(ge, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2314,15 +2254,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(ge, &not_string);
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
__ cmp(ip, Operand::Zero());
- __ b(ne, true_label);
- __ b(false_label);
+ __ b(ne, instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
- __ b(eq, true_label);
+ __ b(eq, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -2334,13 +2274,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(dbl_scratch, 0.0);
__ cmp(r0, r0, vs); // NaN -> false.
- __ b(eq, false_label); // +0, -0 -> false.
- __ b(true_label);
+ __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
+ __ b(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr->environment());
+ }
}
}
}
@@ -2348,7 +2291,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) {
if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -2389,17 +2332,14 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cond = TokenToCondition(instr->op(), false);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2408,7 +2348,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ __ b(vs, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
int32_t value = ToInteger32(LConstantOperand::cast(right));
@@ -2430,7 +2370,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ cmp(ToRegister(left), ToRegister(right));
}
}
- EmitBranch(true_block, false_block, cond);
+ EmitBranch(instr, cond);
}
}
@@ -2438,21 +2378,17 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
__ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2487,22 +2423,21 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
Condition true_cond =
- EmitIsObject(reg, temp1, false_label, true_label);
+ EmitIsObject(reg, temp1,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
__ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
return lt;
@@ -2513,24 +2448,20 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
- EmitIsString(reg, temp1, false_label);
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Register input_reg = EmitLoadRegister(instr->value(), ip);
__ SmiTst(input_reg);
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2538,14 +2469,13 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(instr, ne);
}
@@ -2571,8 +2501,6 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2581,7 +2509,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Condition condition = ComputeCompareCondition(op);
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -2609,15 +2537,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2637,13 +2562,10 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
Register input = ToRegister(instr->value());
Register scratch = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2720,27 +2642,20 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2802,10 +2717,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
@@ -2965,7 +2879,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
@@ -3000,13 +2914,13 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload (CompareRoot might clobber ip).
Register payload = ToRegister(instr->temp());
- __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment());
}
// Store the value.
- __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
@@ -3060,9 +2974,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
@@ -4193,12 +4107,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(r2, Operand(undefined_value));
- }
+ // No cell in r2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ mov(r2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4207,22 +4118,42 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- ASSERT(FLAG_optimize_constructed_arrays);
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
- bool disable_allocation_sites =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ ldr(r5, MemOperand(sp, 0));
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4267,9 +4198,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
if (!transition.is_null()) {
- if (transition->CanBeDeprecated()) {
- transition_maps_.Add(transition, info()->zone());
- }
__ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
@@ -4289,9 +4217,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
ASSERT(!object.is(value));
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
__ str(value, FieldMemOperand(object, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4500,9 +4428,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ str(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ add(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
@@ -5224,9 +5152,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
@@ -5279,10 +5209,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
__ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
__ cmp(reg, Operand(target));
@@ -5382,11 +5311,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
@@ -5396,6 +5321,80 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Register scratch2 = ToRegister(instr->temp2());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+ TAG_OBJECT);
+
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(map, constructor);
+ __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ if (initial_map->inobject_properties() != 0) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ str(scratch, FieldMemOperand(result, property_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ mov(r0, Operand(Smi::FromInt(instance_size)));
+ __ push(r0);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5554,17 +5553,13 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
input,
instr->type_literal());
if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -5649,11 +5644,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -5809,15 +5802,15 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ // Normally we record the first unknown OSR value as the entrypoint to the OSR
+ // code, but if there were none, record the entrypoint here.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index f264259f0a..075fb416c7 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -56,8 +56,6 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -81,7 +79,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
- // TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -153,6 +150,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -173,10 +171,7 @@ class LCodeGen BASE_EMBEDDED {
int additional_offset);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -295,10 +290,7 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -327,7 +319,8 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
bool allow_undefined_as_nan,
@@ -356,7 +349,8 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
@@ -406,8 +400,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index f3cfdc76a9..cce20ffd6a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -29,10 +29,11 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "runtime.h"
@@ -400,10 +401,9 @@ void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
} else {
mov(result, Operand(object));
}
@@ -986,19 +986,19 @@ void MacroAssembler::InitializeNewString(Register string,
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
+#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
+#endif // V8_HOST_ARCH_ARM
}
@@ -2245,6 +2245,9 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset) {
@@ -2275,11 +2278,31 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
+ ASSERT(!thunk_last_arg.is(r3));
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
+ ldrb(r3, MemOperand(r3, 0));
+ cmp(r3, Operand(0));
+ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
+ mov(r3, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(r3, Operand(function));
+ bind(&end_profiler_check);
+
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ stub.GenerateCall(this, r3);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -3067,6 +3090,16 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
}
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+ cmp(reg, Operand(kInternalizedTag));
+ b(lt, not_unique_name);
+ cmp(reg, Operand(SYMBOL_TYPE));
+ b(gt, not_unique_name);
+}
+
+
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
@@ -3371,7 +3404,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 11d3066b91..b76ebd590e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -1085,6 +1085,9 @@ class MacroAssembler: public Assembler {
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_fp);
@@ -1293,6 +1296,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* failure);
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
// ---------------------------------------------------------------------------
// Patching helpers.
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index f05cba521e..189ea8d777 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
+#include "cpu-profiler.h"
#include "unicode.h"
#include "log.h"
#include "code-stubs.h"
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index c9db167b0e..238632aea0 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -30,7 +30,7 @@
#include <cstdarg>
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "disasm.h"
#include "assembler.h"
@@ -830,7 +830,10 @@ class Redirection {
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
}
return new Redirection(external_function, type);
}
@@ -1629,12 +1632,19 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
+ int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
int32_t arg1);
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
+typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1799,6 +1809,31 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
target(arg0);
}
} else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL ||
+ redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ v8::Handle<v8::Value> result = target(arg0, arg1);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, reinterpret_cast<int32_t>(*result));
+ } else {
+ SimulatorRuntimeProfilingApiCallNew target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
+ target(arg0, arg1);
+ }
+ } else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1823,6 +1858,32 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
target(arg0, arg1);
}
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ v8::Handle<v8::Value> result = target(arg0, arg1, arg2);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, reinterpret_cast<int32_t>(*result));
+ } else {
+ SimulatorRuntimeProfilingGetterCallNew target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(
+ external);
+ target(arg0, arg1, arg2);
+ }
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -1830,7 +1891,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p"
+ "Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 3595b5233f..c154f9add4 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "ic-inl.h"
#include "codegen.h"
@@ -427,12 +427,10 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<Name> name,
Register scratch,
Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
+ Handle<Cell> cell = GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
@@ -514,7 +512,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register storage_reg = name_reg;
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (details.type() == CONSTANT_FUNCTION) {
+ Handle<HeapObject> constant(
+ HeapObject::cast(descriptors->GetValue(descriptor)));
+ __ LoadHeapObject(scratch1, constant);
+ __ cmp(value_reg, scratch1);
+ __ b(ne, miss_restore_name);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
@@ -543,7 +547,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (object->map()->unused_property_fields() == 0) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -572,6 +577,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
+ if (details.type() == CONSTANT_FUNCTION) {
+ ASSERT(value_reg.is(r0));
+ __ Ret();
+ return;
+ }
+
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
@@ -958,8 +969,22 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeInvocationCallback)
+ : FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type =
+ returns_handle ?
+ ExternalReference::PROFILING_API_CALL :
+ ExternalReference::PROFILING_API_CALL_NEW;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
AllowExternalCallThatCantCauseGC scope(masm);
__ CallApiFunctionAndReturn(ref,
+ function_address,
+ thunk_ref,
+ r1,
kStackUnwindSpace,
returns_handle,
kFastApiCallArguments + 1);
@@ -1456,14 +1481,28 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
Address getter_address = v8::ToCData<Address>(callback->getter());
bool returns_handle =
!CallbackTable::ReturnsVoid(isolate(), getter_address);
+
ApiFunction fun(getter_address);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_GETTER_CALL :
ExternalReference::DIRECT_GETTER_CALL_NEW;
-
ExternalReference ref = ExternalReference(&fun, type, isolate());
+
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeAccessorGetter)
+ : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ returns_handle ?
+ ExternalReference::PROFILING_GETTER_CALL :
+ ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ isolate());
__ CallApiFunctionAndReturn(ref,
+ getter_address,
+ thunk_ref,
+ r2,
kStackUnwindSpace,
returns_handle,
5);
@@ -1586,12 +1625,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r3, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1659,12 +1698,61 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+ Register receiver = r1;
+
+ if (cell.is_null()) {
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0,
+ r4, name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+ Handle<Cell> kind_feedback_cell =
+ isolate()->factory()->NewCell(kind);
+ __ mov(r0, Operand(argc));
+ __ mov(r2, Operand(kind_feedback_cell));
+ __ mov(r1, Operand(function));
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1908,16 +1996,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1990,16 +2079,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2072,16 +2162,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2155,16 +2246,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2227,16 +2319,17 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2335,16 +2428,17 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2433,7 +2527,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2441,7 +2535,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
Counters* counters = isolate()->counters();
@@ -2613,8 +2707,9 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<JSFunction> function) {
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, Handle<String>::cast(name));
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT_FUNCTION);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2671,7 +2766,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
// ----------- S t a t e -------------
@@ -2680,7 +2775,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name));
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2844,7 +2940,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name) {
Label miss;
@@ -2859,14 +2955,12 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// global object. We bail out to the runtime system to do that.
__ mov(scratch1(), Operand(cell));
__ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ ldr(scratch3(),
- FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
+ __ ldr(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
__ cmp(scratch3(), scratch2());
__ b(eq, &miss);
// Store the value in the cell.
- __ str(value(),
- FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
+ __ str(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
Counters* counters = isolate()->counters();
@@ -2990,7 +3084,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label success, miss;
@@ -3002,7 +3096,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
__ mov(r3, Operand(cell));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 2b0c3dd85b..4a4f570146 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -31,12 +31,12 @@ var $ArrayBuffer = global.ArrayBuffer;
// -------------------------------------------------------------------
-function ArrayBufferConstructor(byteLength) { // length = 1
+function ArrayBufferConstructor(length) { // length = 1
if (%_IsConstructCall()) {
- var l = TO_POSITIVE_INTEGER(byteLength);
- %ArrayBufferInitialize(this, l);
+ var byteLength = ToPositiveInteger(length, 'invalid_array_buffer_length');
+ %ArrayBufferInitialize(this, byteLength);
} else {
- return new $ArrayBuffer(byteLength);
+ throw MakeTypeError('constructor_not_function', ["ArrayBuffer"]);
}
}
@@ -70,6 +70,9 @@ function ArrayBufferSlice(start, end) {
fin = MathMin(relativeEnd, this.byteLength);
}
+ if (fin < first) {
+ fin = first;
+ }
var newLen = fin - first;
// TODO(dslomov): implement inheritance
var result = new $ArrayBuffer(newLen);
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 6b0c4b8456..b669e0911f 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -724,7 +724,7 @@ bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// generation.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::kApplyMask;
RelocIterator it(desc, mode_mask);
return !it.done();
@@ -754,8 +754,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "code target";
case RelocInfo::CODE_TARGET_WITH_ID:
return "code target with id";
- case RelocInfo::GLOBAL_PROPERTY_CELL:
- return "global property cell";
+ case RelocInfo::CELL:
+ return "property cell";
case RelocInfo::RUNTIME_ENTRY:
return "runtime entry";
case RelocInfo::JS_RETURN:
@@ -830,7 +830,7 @@ void RelocInfo::Verify() {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
break;
- case GLOBAL_PROPERTY_CELL:
+ case CELL:
Object::VerifyPointer(target_cell());
break;
case DEBUG_BREAK:
@@ -1308,7 +1308,7 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
ExternalReference ExternalReference::re_check_stack_guard_state(
Isolate* isolate) {
Address function;
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 2d9e727e57..95853e8e3a 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -254,7 +254,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
- GLOBAL_PROPERTY_CELL,
+ CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@@ -282,7 +282,7 @@ class RelocInfo BASE_EMBEDDED {
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
- LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
+ LAST_GCED_ENUM = CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
@@ -386,9 +386,9 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_target_runtime_entry(Address target,
WriteBarrierMode mode =
UPDATE_WRITE_BARRIER));
- INLINE(JSGlobalPropertyCell* target_cell());
- INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
- INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
+ INLINE(Cell* target_cell());
+ INLINE(Handle<Cell> target_cell_handle());
+ INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
@@ -647,17 +647,35 @@ class ExternalReference BASE_EMBEDDED {
// Handle<Value> f(v8::Arguments&)
DIRECT_API_CALL,
+ // Call to invocation callback via InvokeInvocationCallback.
+ // Handle<Value> f(v8::Arguments&, v8::InvocationCallback)
+ PROFILING_API_CALL,
+
// Direct call to API function callback.
// void f(v8::Arguments&)
DIRECT_API_CALL_NEW,
+ // Call to function callback via InvokeFunctionCallback.
+ // void f(v8::Arguments&, v8::FunctionCallback)
+ PROFILING_API_CALL_NEW,
+
// Direct call to accessor getter callback.
// Handle<value> f(Local<String> property, AccessorInfo& info)
DIRECT_GETTER_CALL,
+ // Call to accessor getter callback via InvokeAccessorGetter.
+ // Handle<value> f(Local<String> property, AccessorInfo& info,
+ // AccessorGetter getter)
+ PROFILING_GETTER_CALL,
+
// Direct call to accessor getter callback.
// void f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL_NEW
+ DIRECT_GETTER_CALL_NEW,
+
+ // Call to accessor getter callback via InvokeAccessorGetterCallback.
+ // void f(Local<String> property, AccessorInfo& info,
+ // AccessorGetterCallback callback)
+ PROFILING_GETTER_CALL_NEW
};
static void SetUp();
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index e2ec542a77..13adbd0f9c 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -79,7 +79,11 @@ class PerThreadAssertScopeBase {
protected:
PerThreadAssertScopeBase() {
- data_ = AssertData();
+ data_ = GetAssertData();
+ if (data_ == NULL) {
+ data_ = new PerThreadAssertData();
+ SetThreadLocalData(data_);
+ }
data_->increment_level();
}
@@ -89,22 +93,22 @@ class PerThreadAssertScopeBase {
ASSERT(data_->get(static_cast<PerThreadAssertType>(i)));
}
delete data_;
- Thread::SetThreadLocal(thread_local_key, NULL);
+ SetThreadLocalData(NULL);
}
- static PerThreadAssertData* AssertData() {
- PerThreadAssertData* data = reinterpret_cast<PerThreadAssertData*>(
- Thread::GetThreadLocal(thread_local_key));
- if (data == NULL) {
- data = new PerThreadAssertData();
- Thread::SetThreadLocal(thread_local_key, data);
- }
- return data;
+ static PerThreadAssertData* GetAssertData() {
+ return reinterpret_cast<PerThreadAssertData*>(
+ Thread::GetThreadLocal(thread_local_key));
}
static Thread::LocalStorageKey thread_local_key;
PerThreadAssertData* data_;
friend class Isolate;
+
+ private:
+ static void SetThreadLocalData(PerThreadAssertData* data) {
+ Thread::SetThreadLocal(thread_local_key, data);
+ }
#endif // DEBUG
};
@@ -124,7 +128,10 @@ class PerThreadAssertScope : public PerThreadAssertScopeBase {
~PerThreadAssertScope() { data_->set(type, old_state_); }
- static bool IsAllowed() { return AssertData()->get(type); }
+ static bool IsAllowed() {
+ PerThreadAssertData* data = GetAssertData();
+ return data == NULL || data->get(type);
+ }
private:
bool old_state_;
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index a5d1e2df85..589bd5a48f 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -57,22 +57,22 @@ AST_NODE_LIST(DECL_ACCEPT)
bool Expression::IsSmiLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsSmi();
}
bool Expression::IsStringLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsString();
}
bool Expression::IsNullLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsNull();
}
bool Expression::IsUndefinedLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsUndefined();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsUndefined();
}
@@ -135,6 +135,7 @@ Assignment::Assignment(Isolate* isolate,
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
is_monomorphic_(false),
+ is_uninitialized_(false),
store_mode_(STANDARD_STORE) { }
@@ -188,7 +189,7 @@ ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
emit_store_ = true;
key_ = key;
value_ = value;
- Object* k = *key->handle();
+ Object* k = *key->value();
if (k->IsInternalizedString() &&
isolate->heap()->proto_string()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
@@ -262,7 +263,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
- if (literal->handle()->IsNull()) continue;
+ if (literal->value()->IsNull()) continue;
uint32_t hash = literal->Hash();
// If the key of a computed property is in the table, do not emit
// a store for the property later.
@@ -287,6 +288,16 @@ void TargetCollector::AddTarget(Label* target, Zone* zone) {
}
+void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ // TODO(olivf) If this Operation is used in a test context, then the
+ // expression has a ToBoolean stub and we want to collect the type
+ // information. However the GraphBuilder expects it to be on the instruction
+ // corresponding to the TestContext, therefore we have to store it here and
+ // not on the operand.
+ set_to_boolean_types(oracle->ToBooleanTypes(expression()->test_id()));
+}
+
+
bool UnaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::BIT_NOT:
@@ -298,6 +309,16 @@ bool UnaryOperation::ResultOverwriteAllowed() {
}
+void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ // TODO(olivf) If this Operation is used in a test context, then the right
+ // hand side has a ToBoolean stub and we want to collect the type information.
+ // However the GraphBuilder expects it to be on the instruction corresponding
+ // to the TestContext, therefore we have to store it here and not on the
+ // right hand operand.
+ set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
+}
+
+
bool BinaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::COMMA:
@@ -337,7 +358,7 @@ static bool MatchLiteralCompareTypeof(Expression* left,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
- *check = Handle<String>::cast(right->AsLiteral()->handle());
+ *check = Handle<String>::cast(right->AsLiteral()->value());
return true;
}
return false;
@@ -417,6 +438,10 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
+// TODO(rossberg): all RecordTypeFeedback functions should disappear
+// once we use the common type field in the AST consistently.
+
+
void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
}
@@ -444,8 +469,8 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
oracle->LoadReceiverTypes(this, name, &receiver_types_);
}
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
@@ -465,12 +490,14 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
TypeFeedbackId id = AssignmentFeedbackId();
+ is_uninitialized_ = oracle->StoreIsUninitialized(id);
+ if (is_uninitialized_) return;
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
@@ -503,19 +530,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->SwitchType(this);
- if (info.IsUninitialized()) info = TypeInfo::Unknown();
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsInternalizedString()) {
- compare_type_ = NAME_ONLY;
- } else if (info.IsNonInternalizedString()) {
- compare_type_ = STRING_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
+ compare_type_ = oracle->ClauseType(CompareId());
}
@@ -570,11 +585,11 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
LookupResult* lookup) {
target_ = Handle<JSFunction>::null();
- cell_ = Handle<JSGlobalPropertyCell>::null();
+ cell_ = Handle<Cell>::null();
ASSERT(lookup->IsFound() &&
lookup->type() == NORMAL &&
lookup->holder() == *global);
- cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+ cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
@@ -624,8 +639,8 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else {
// Method call. Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
+ ASSERT(key != NULL && key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(key->value());
receiver_types_.Clear();
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
#ifdef DEBUG
@@ -674,31 +689,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
-void UnaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- type_ = oracle->UnaryType(this);
-}
-
-
-void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- oracle->BinaryType(this, &left_type_, &right_type_, &result_type_,
- &has_fixed_right_arg_, &fixed_right_arg_value_);
-}
-
-
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- oracle->CompareType(this, &left_type_, &right_type_, &overall_type_);
- if (!overall_type_.IsUninitialized() && overall_type_.IsNonPrimitive() &&
- (op_ == Token::EQ || op_ == Token::EQ_STRICT)) {
- map_ = oracle->GetCompareMap(this);
- } else {
- // May be a compare to nil.
- map_ = oracle->CompareNilMonomorphicReceiverType(this);
- if (op_ != Token::EQ_STRICT)
- compare_nil_types_ = oracle->CompareNilTypes(this);
- }
-}
-
-
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@@ -1072,7 +1062,7 @@ CaseClause::CaseClause(Isolate* isolate,
: label_(label),
statements_(statements),
position_(pos),
- compare_type_(NONE),
+ compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
}
@@ -1182,18 +1172,18 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
- if (handle_->IsString()) return Handle<String>::cast(handle_);
+ if (value_->IsString()) return Handle<String>::cast(value_);
Factory* factory = Isolate::Current()->factory();
- ASSERT(handle_->IsNumber());
+ ASSERT(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str;
- if (handle_->IsSmi()) {
+ if (value_->IsSmi()) {
// Optimization only, the heap number case would subsume this.
- OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
+ OS::SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
str = arr;
} else {
- str = DoubleToCString(handle_->Number(), buffer);
+ str = DoubleToCString(value_->Number(), buffer);
}
return factory->NewStringFromAscii(CStrVector(str));
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 219a69bc8e..b9a98e0b7d 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -356,8 +356,11 @@ class Expression: public AstNode {
// True iff the expression is the undefined literal.
bool IsUndefinedLiteral();
- // Expression type
- Handle<Type> type() { return type_; }
+ // Expression type bounds
+ Handle<Type> upper_type() { return upper_type_; }
+ Handle<Type> lower_type() { return lower_type_; }
+ void set_upper_type(Handle<Type> type) { upper_type_ = type; }
+ void set_lower_type(Handle<Type> type) { lower_type_ = type; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
@@ -380,7 +383,7 @@ class Expression: public AstNode {
}
// TODO(rossberg): this should move to its own AST node eventually.
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
byte to_boolean_types() const { return to_boolean_types_; }
BailoutId id() const { return id_; }
@@ -388,12 +391,15 @@ class Expression: public AstNode {
protected:
explicit Expression(Isolate* isolate)
- : type_(Type::Any(), isolate),
+ : upper_type_(Type::Any(), isolate),
+ lower_type_(Type::None(), isolate),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
+ void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
- Handle<Type> type_;
+ Handle<Type> upper_type_;
+ Handle<Type> lower_type_;
byte to_boolean_types_;
const BailoutId id_;
@@ -1106,24 +1112,15 @@ class CaseClause: public ZoneObject {
// Type feedback information.
TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsNameCompare() { return compare_type_ == NAME_ONLY; }
- bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+ Handle<Type> compare_type() { return compare_type_; }
private:
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
int position_;
- enum CompareTypeFeedback {
- NONE,
- SMI_ONLY,
- NAME_ONLY,
- STRING_ONLY,
- OBJECT_ONLY
- };
- CompareTypeFeedback compare_type_;
+ Handle<Type> compare_type_;
+
const TypeFeedbackId compare_id_;
const BailoutId entry_id_;
};
@@ -1316,36 +1313,36 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
virtual bool IsPropertyName() {
- if (handle_->IsInternalizedString()) {
+ if (value_->IsInternalizedString()) {
uint32_t ignored;
- return !String::cast(*handle_)->AsArrayIndex(&ignored);
+ return !String::cast(*value_)->AsArrayIndex(&ignored);
}
return false;
}
Handle<String> AsPropertyName() {
ASSERT(IsPropertyName());
- return Handle<String>::cast(handle_);
+ return Handle<String>::cast(value_);
}
- virtual bool ToBooleanIsTrue() { return handle_->BooleanValue(); }
- virtual bool ToBooleanIsFalse() { return !handle_->BooleanValue(); }
+ virtual bool ToBooleanIsTrue() { return value_->BooleanValue(); }
+ virtual bool ToBooleanIsFalse() { return !value_->BooleanValue(); }
// Identity testers.
bool IsNull() const {
- ASSERT(!handle_.is_null());
- return handle_->IsNull();
+ ASSERT(!value_.is_null());
+ return value_->IsNull();
}
bool IsTrue() const {
- ASSERT(!handle_.is_null());
- return handle_->IsTrue();
+ ASSERT(!value_.is_null());
+ return value_->IsTrue();
}
bool IsFalse() const {
- ASSERT(!handle_.is_null());
- return handle_->IsFalse();
+ ASSERT(!value_.is_null());
+ return value_->IsFalse();
}
- Handle<Object> handle() const { return handle_; }
+ Handle<Object> value() const { return value_; }
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
@@ -1360,14 +1357,14 @@ class Literal: public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(Isolate* isolate, Handle<Object> handle)
+ Literal(Isolate* isolate, Handle<Object> value)
: Expression(isolate),
- handle_(handle) { }
+ value_(value) { }
private:
Handle<String> ToString();
- Handle<Object> handle_;
+ Handle<Object> value_;
};
@@ -1705,7 +1702,7 @@ class Call: public Expression {
// as the holder!
Handle<JSObject> holder() { return holder_; }
- Handle<JSGlobalPropertyCell> cell() { return cell_; }
+ Handle<Cell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
@@ -1745,7 +1742,7 @@ class Call: public Expression {
SmallMapList receiver_types_;
Handle<JSFunction> target_;
Handle<JSObject> holder_;
- Handle<JSGlobalPropertyCell> cell_;
+ Handle<Cell> cell_;
const BailoutId return_id_;
};
@@ -1765,7 +1762,7 @@ class CallNew: public Expression {
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
- Handle<JSGlobalPropertyCell> allocation_info_cell() const {
+ Handle<Cell> allocation_info_cell() const {
return allocation_info_cell_;
}
@@ -1792,7 +1789,7 @@ class CallNew: public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
ElementsKind elements_kind_;
- Handle<JSGlobalPropertyCell> allocation_info_cell_;
+ Handle<Cell> allocation_info_cell_;
const BailoutId return_id_;
};
@@ -1844,8 +1841,8 @@ class UnaryOperation: public Expression {
BailoutId MaterializeFalseId() { return materialize_false_id_; }
TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- TypeInfo type() const { return type_; }
+
+ virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
protected:
UnaryOperation(Isolate* isolate,
@@ -1866,8 +1863,6 @@ class UnaryOperation: public Expression {
Expression* expression_;
int pos_;
- TypeInfo type_;
-
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
const BailoutId materialize_true_id_;
@@ -1889,12 +1884,13 @@ class BinaryOperation: public Expression {
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- TypeInfo left_type() const { return left_type_; }
- TypeInfo right_type() const { return right_type_; }
- TypeInfo result_type() const { return result_type_; }
- bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
- int fixed_right_arg_value() const { return fixed_right_arg_value_; }
+ // TODO(rossberg): result_type should be subsumed by lower_type.
+ Handle<Type> result_type() const { return result_type_; }
+ void set_result_type(Handle<Type> type) { result_type_ = type; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
+
+ virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
protected:
BinaryOperation(Isolate* isolate,
@@ -1917,11 +1913,10 @@ class BinaryOperation: public Expression {
Expression* right_;
int pos_;
- TypeInfo left_type_;
- TypeInfo right_type_;
- TypeInfo result_type_;
- bool has_fixed_right_arg_;
- int fixed_right_arg_value_;
+ Handle<Type> result_type_;
+ // TODO(rossberg): the fixed arg should probably be represented as a Constant
+ // type for the RHS.
+ Maybe<int> fixed_right_arg_;
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
@@ -2002,12 +1997,8 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- TypeInfo left_type() const { return left_type_; }
- TypeInfo right_type() const { return right_type_; }
- TypeInfo overall_type() const { return overall_type_; }
- byte compare_nil_types() const { return compare_nil_types_; }
- Handle<Map> map() const { return map_; }
+ Handle<Type> combined_type() const { return combined_type_; }
+ void set_combined_type(Handle<Type> type) { combined_type_ = type; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2034,11 +2025,7 @@ class CompareOperation: public Expression {
Expression* right_;
int pos_;
- TypeInfo left_type_;
- TypeInfo right_type_;
- TypeInfo overall_type_;
- byte compare_nil_types_;
- Handle<Map> map_;
+ Handle<Type> combined_type_;
};
@@ -2106,6 +2093,7 @@ class Assignment: public Expression {
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
+ bool IsUninitialized() { return is_uninitialized_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual KeyedAccessStoreMode GetStoreMode() {
return store_mode_;
@@ -2136,6 +2124,7 @@ class Assignment: public Expression {
const BailoutId assignment_id_;
bool is_monomorphic_ : 1;
+ bool is_uninitialized_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index ebca91d27d..b18b54d77b 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -154,17 +154,17 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+ (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+ (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+ (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h
index 6559336ad9..e52c26c2fe 100644
--- a/deps/v8/src/atomicops_internals_tsan.h
+++ b/deps/v8/src/atomicops_internals_tsan.h
@@ -62,97 +62,162 @@ typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
typedef enum {
- __tsan_memory_order_relaxed = (1 << 0) + 100500,
- __tsan_memory_order_consume = (1 << 1) + 100500,
- __tsan_memory_order_acquire = (1 << 2) + 100500,
- __tsan_memory_order_release = (1 << 3) + 100500,
- __tsan_memory_order_acq_rel = (1 << 4) + 100500,
- __tsan_memory_order_seq_cst = (1 << 5) + 100500,
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst,
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
__tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
__tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
__tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
__tsan_memory_order mo);
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
__tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
__tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
__tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
__tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
@@ -160,166 +225,166 @@ void __tsan_atomic_thread_fence(__tsan_memory_order mo);
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
+ __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
+ __tsan_memory_order_acquire);
}
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
+ __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
+ __tsan_memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
+ __tsan_memory_order_acq_rel);
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
+ __tsan_memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
+ __tsan_memory_order_acq_rel);
}
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index a51a9b117e..49333eb21c 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -45,6 +45,10 @@
#include "extensions/statistics-extension.h"
#include "code-stubs.h"
+#if defined(V8_I18N_SUPPORT)
+#include "extensions/i18n/i18n-extension.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -102,6 +106,9 @@ void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
+#if defined(V8_I18N_SUPPORT)
+ v8_i18n::Extension::Register();
+#endif
}
@@ -538,31 +545,33 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
if (prototypeMode != DONT_ADD_PROTOTYPE) {
prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
}
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
map->set_instance_descriptors(*descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
+ CallbacksDescriptor d(*factory()->length_string(), *length, ro_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, rw_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_string(), *arguments, attribs);
+ CallbacksDescriptor d(*factory()->arguments_string(), *arguments,
+ rw_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
+ CallbacksDescriptor d(*factory()->caller_string(), *caller, rw_attribs);
map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
- if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
- attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
- }
+ PropertyAttributes attribs =
+ prototypeMode == ADD_WRITEABLE_PROTOTYPE ? rw_attribs : ro_attribs;
CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
map->AppendDescriptor(&d, witness);
}
@@ -857,12 +866,18 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
isolate->initial_object_prototype(),
Builtins::kArrayCode, true, true);
array_function->shared()->DontAdaptArguments();
+ array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
Handle<Map> initial_map(array_function->initial_map());
+
+ // This assert protects an optimization in
+ // HGraphBuilder::JSArrayBuilder::EmitMapCode()
+ ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind());
+
Handle<DescriptorArray> array_descriptors(
factory->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors);
@@ -883,16 +898,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// overwritten by JS code.
native_context()->set_array_function(*array_function);
- if (FLAG_optimize_constructed_arrays) {
- // Cache the array maps, needed by ArrayConstructorStub
- CacheInitialJSArrayMaps(native_context(), initial_map);
- ArrayConstructorStub array_constructor_stub(isolate);
- Handle<Code> code = array_constructor_stub.GetCode(isolate);
- array_function->shared()->set_construct_stub(*code);
- } else {
- array_function->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode));
- }
+ // Cache the array maps, needed by ArrayConstructorStub
+ CacheInitialJSArrayMaps(native_context(), initial_map);
+ ArrayConstructorStub array_constructor_stub(isolate);
+ Handle<Code> code = array_constructor_stub.GetCode(isolate);
+ array_function->shared()->set_construct_stub(*code);
}
{ // --- N u m b e r ---
@@ -1359,6 +1369,14 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
EXTERNAL_PIXEL_ELEMENTS);
native_context()->set_uint8c_array_fun(*uint8c_fun);
+
+ Handle<JSFunction> data_view_fun =
+ InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_data_view_fun(*data_view_fun);
}
if (FLAG_harmony_generators) {
@@ -1612,15 +1630,9 @@ Handle<JSFunction> Genesis::InstallInternalArray(
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
- if (FLAG_optimize_constructed_arrays) {
- InternalArrayConstructorStub internal_array_constructor_stub(isolate());
- Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
- array_function->shared()->set_construct_stub(*code);
- } else {
- array_function->shared()->set_construct_stub(
- isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
- }
-
+ InternalArrayConstructorStub internal_array_constructor_stub(isolate());
+ Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
+ array_function->shared()->set_construct_stub(*code);
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
@@ -2274,6 +2286,12 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
InstallExtension(isolate, "v8/statistics", &extension_states);
}
+#if defined(V8_I18N_SUPPORT)
+ if (FLAG_enable_i18n) {
+ InstallExtension(isolate, "v8/i18n", &extension_states);
+ }
+#endif
+
if (extensions == NULL) return true;
// Install required extensions
int count = v8::ImplementationUtilities::GetNameCount(extensions);
@@ -2499,8 +2517,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Name> key = Handle<Name>(Name::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i),
isolate());
- if (value->IsJSGlobalPropertyCell()) {
- value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value(),
+ ASSERT(!value->IsCell());
+ if (value->IsPropertyCell()) {
+ value = Handle<Object>(PropertyCell::cast(*value)->value(),
isolate());
}
PropertyDetails details = properties->DetailsAt(i);
@@ -2574,7 +2593,14 @@ Genesis::Genesis(Isolate* isolate,
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return;
- native_context_ = Snapshot::NewContextFromSnapshot();
+ // We can only de-serialize a context if the isolate was initialized from
+ // a snapshot. Otherwise we have to build the context from scratch.
+ if (isolate->initialized_from_snapshot()) {
+ native_context_ = Snapshot::NewContextFromSnapshot();
+ } else {
+ native_context_ = Handle<Context>();
+ }
+
if (!native_context().is_null()) {
AddToWeakNativeContextList(*native_context());
isolate->set_context(*native_context());
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index d97a4778af..be04ddf53d 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
+#include "cpu-profiler.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "heap-profiler.h"
@@ -209,23 +210,21 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;
- if (FLAG_optimize_constructed_arrays) {
- AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
- ElementsKind to_kind = array->GetElementsKind();
- if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
- if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
- to_kind)) {
- // We have advice that we should change the elements kind
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
- reinterpret_cast<void*>(array),
- ElementsKindToString(array->GetElementsKind()),
- ElementsKindToString(to_kind));
- }
-
- maybe_array = array->TransitionElementsKind(to_kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
+ ElementsKind to_kind = array->GetElementsKind();
+ if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
+ if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
+ to_kind)) {
+ // We have advice that we should change the elements kind
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
+ reinterpret_cast<void*>(array),
+ ElementsKindToString(array->GetElementsKind()),
+ ElementsKindToString(to_kind));
}
+
+ maybe_array = array->TransitionElementsKind(to_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
}
}
@@ -1267,14 +1266,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
args.length() - 1,
is_construct);
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = custom.Call(callback);
- }
+ v8::Handle<v8::Value> value = custom.Call(callback);
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
@@ -1343,14 +1335,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
&args[0] - 1,
args.length() - 1,
is_construct_call);
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = custom.Call(callback);
- }
+ v8::Handle<v8::Value> value = custom.Call(callback);
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
@@ -1496,12 +1481,12 @@ static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
+ StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
}
static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kStrictMode);
+ StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
}
@@ -1510,6 +1495,16 @@ static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
}
+static void Generate_StoreIC_Generic(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
+}
+
+
+static void Generate_StoreIC_Generic_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
+}
+
+
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index c45fbfd335..edf650df2e 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -166,6 +166,10 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
+ V(StoreIC_Generic, STORE_IC, GENERIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Generic_Strict, STORE_IC, GENERIC, \
+ kStrictMode) \
V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
@@ -205,8 +209,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(CommonArrayConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -395,7 +397,6 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
- static void Generate_CommonArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 99c4db55b7..96266af119 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -106,7 +106,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
};
HValue* BuildArrayConstructor(ElementsKind kind,
- bool disable_allocation_sites,
+ ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class);
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
@@ -144,7 +145,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
set_current_block(next_block);
HConstant* undefined_constant = new(zone) HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
+ isolate()->factory()->undefined_value());
AddInstruction(undefined_constant);
graph()->set_undefined_constant(undefined_constant);
@@ -196,8 +197,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
stack_pop_count->ClearFlag(HValue::kCanOverflow);
} else {
int count = descriptor_->hint_stack_parameter_count_;
- stack_pop_count = AddInstruction(new(zone)
- HConstant(count, Representation::Integer32()));
+ stack_pop_count = AddInstruction(new(zone) HConstant(count));
}
}
@@ -391,13 +391,11 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* boilerplate_size =
AddInstruction(new(zone) HInstanceSize(boilerplate));
HValue* size_in_words =
- AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
- Representation::Integer32()));
+ AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2));
checker.IfCompare(boilerplate_size, size_in_words, Token::EQ);
checker.Then();
- HValue* size_in_bytes =
- AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
+ HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size));
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
if (isolate()->heap()->ShouldGloballyPretenure()) {
flags = static_cast<HAllocate::Flags>(
@@ -512,8 +510,7 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
HInstruction* elements = AddLoadElements(js_array);
- HInstruction* elements_length =
- AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
context(), to_kind, elements_length);
@@ -537,15 +534,19 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
}
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
- ElementsKind kind, bool disable_allocation_sites,
+ ElementsKind kind,
+ ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class) {
HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
- HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
- HInstruction* array_function = BuildGetArrayFunction(context());
+ if (context_mode == CONTEXT_CHECK_REQUIRED) {
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker checker(this, constructor, array_function);
+ }
- ArrayContextChecker(this, constructor, array_function);
- JSArrayBuilder array_builder(this, kind, property_cell,
- disable_allocation_sites);
+ HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
+ JSArrayBuilder array_builder(this, kind, property_cell, constructor,
+ override_mode);
HValue* result = NULL;
switch (argument_class) {
case NONE:
@@ -558,6 +559,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
result = BuildArrayNArgumentsConstructor(&array_builder, kind);
break;
}
+
return result;
}
@@ -602,7 +604,7 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HConstant* initial_capacity_node = new(zone()) HConstant(initial_capacity);
AddInstruction(initial_capacity_node);
- HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length);
+ HBoundsCheck* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
IfBuilder if_builder(this);
if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
if_builder.Then();
@@ -655,8 +657,9 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
- return BuildArrayConstructor(kind, disable_allocation_sites, NONE);
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, NONE);
}
@@ -669,8 +672,9 @@ template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
- return BuildArrayConstructor(kind, disable_allocation_sites, SINGLE);
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, SINGLE);
}
@@ -682,8 +686,9 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
- return BuildArrayConstructor(kind, disable_allocation_sites, MULTIPLE);
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, MULTIPLE);
}
@@ -733,12 +738,13 @@ Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
+ Isolate* isolate = graph()->isolate();
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
- Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map());
- BuildCompareNil(GetParameter(0),
- stub->GetTypes(), sentinel_map,
- RelocInfo::kNoPosition, &continuation);
+ Handle<Map> sentinel_map(isolate->heap()->meta_map());
+ Handle<Type> type =
+ CompareNilICStub::StateToType(isolate, stub->GetState(), sentinel_map);
+ BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 6b6e25019d..2ed2ba3c66 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "stub-cache.h"
#include "factory.h"
#include "gdb-jit.h"
@@ -431,24 +432,24 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
void CompareNilICStub::Record(Handle<Object> object) {
- ASSERT(types_ != Types::FullCompare());
+ ASSERT(state_ != State::Generic());
if (object->IsNull()) {
- types_.Add(NULL_TYPE);
+ state_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
- types_.Add(UNDEFINED);
+ state_.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
- types_ = Types::FullCompare();
+ state_ = State::Generic();
} else if (IsMonomorphic()) {
- types_ = Types::FullCompare();
+ state_ = State::Generic();
} else {
- types_.Add(MONOMORPHIC_MAP);
+ state_.Add(MONOMORPHIC_MAP);
}
}
-void CompareNilICStub::Types::TraceTransition(Types to) const {
+void CompareNilICStub::State::TraceTransition(State to) const {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
@@ -467,13 +468,13 @@ void CompareNilICStub::Types::TraceTransition(Types to) const {
void CompareNilICStub::PrintName(StringStream* stream) {
stream->Add("CompareNilICStub_");
- types_.Print(stream);
+ state_.Print(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
"(UndefinedValue|");
}
-void CompareNilICStub::Types::Print(StringStream* stream) const {
+void CompareNilICStub::State::Print(StringStream* stream) const {
stream->Add("(");
SimpleListPrinter printer(stream);
if (IsEmpty()) printer.Add("None");
@@ -481,10 +482,40 @@ void CompareNilICStub::Types::Print(StringStream* stream) const {
if (Contains(NULL_TYPE)) printer.Add("Null");
if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
+ if (Contains(GENERIC)) printer.Add("Generic");
stream->Add(")");
}
+Handle<Type> CompareNilICStub::StateToType(
+ Isolate* isolate,
+ State state,
+ Handle<Map> map) {
+ if (state.Contains(CompareNilICStub::GENERIC)) {
+ return handle(Type::Any(), isolate);
+ }
+
+ Handle<Type> result(Type::None(), isolate);
+ if (state.Contains(CompareNilICStub::UNDEFINED)) {
+ result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
+ isolate);
+ }
+ if (state.Contains(CompareNilICStub::NULL_TYPE)) {
+ result = handle(Type::Union(result, handle(Type::Null(), isolate)),
+ isolate);
+ }
+ if (state.Contains(CompareNilICStub::UNDETECTABLE)) {
+ result = handle(Type::Union(result, handle(Type::Undetectable(), isolate)),
+ isolate);
+ } else if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+ Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
+ result = handle(Type::Union(result, handle(type, isolate)), isolate);
+ }
+
+ return result;
+}
+
+
void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
@@ -727,24 +758,11 @@ void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
}
-FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
-
-
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer) {
- if (entry_hook_ != NULL)
- entry_hook_(function, stack_pointer);
-}
-
-
-bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- // We don't allow setting a new entry hook over one that's
- // already active, as the hooks won't stack.
- if (entry_hook != 0 && entry_hook_ != 0)
- return false;
-
- entry_hook_ = entry_hook;
- return true;
+ FunctionEntryHook entry_hook = Isolate::Current()->function_entry_hook();
+ ASSERT(entry_hook != NULL);
+ entry_hook(function, stack_pointer);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 0ea7ac96b5..d197c841b1 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -95,7 +95,7 @@ namespace internal {
V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
@@ -107,7 +107,7 @@ namespace internal {
#endif
// List of code stubs only used on MIPS platforms.
-#ifdef V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry)
@@ -904,8 +904,7 @@ class BinaryOpStub: public PlatformCodeStub {
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
- has_fixed_right_arg_(false),
- encoded_right_arg_(encode_arg_value(1)) {
+ encoded_right_arg_(false, encode_arg_value(1)) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -915,16 +914,15 @@ class BinaryOpStub: public PlatformCodeStub {
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type,
- bool has_fixed_right_arg,
- int32_t fixed_right_arg_value)
+ Maybe<int32_t> fixed_right_arg)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type),
- has_fixed_right_arg_(has_fixed_right_arg),
- encoded_right_arg_(encode_arg_value(fixed_right_arg_value)) { }
+ encoded_right_arg_(fixed_right_arg.has_value,
+ encode_arg_value(fixed_right_arg.value)) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
@@ -942,16 +940,14 @@ class BinaryOpStub: public PlatformCodeStub {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
- static bool decode_has_fixed_right_arg_from_minor_key(int minor_key) {
- return HasFixedRightArgBits::decode(minor_key);
- }
-
- static int decode_fixed_right_arg_value_from_minor_key(int minor_key) {
- return decode_arg_value(FixedRightArgValueBits::decode(minor_key));
+ static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
+ return Maybe<int>(
+ HasFixedRightArgBits::decode(minor_key),
+ decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
}
int fixed_right_arg_value() const {
- return decode_arg_value(encoded_right_arg_);
+ return decode_arg_value(encoded_right_arg_.value);
}
static bool can_encode_arg_value(int32_t value) {
@@ -975,8 +971,7 @@ class BinaryOpStub: public PlatformCodeStub {
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
- bool has_fixed_right_arg_;
- int encoded_right_arg_;
+ Maybe<int> encoded_right_arg_;
static int encode_arg_value(int32_t value) {
ASSERT(can_encode_arg_value(value));
@@ -1009,8 +1004,8 @@ class BinaryOpStub: public PlatformCodeStub {
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_)
- | HasFixedRightArgBits::encode(has_fixed_right_arg_)
- | FixedRightArgValueBits::encode(encoded_right_arg_);
+ | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
+ | FixedRightArgValueBits::encode(encoded_right_arg_.value);
}
@@ -1124,46 +1119,50 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
- enum Type {
+ enum CompareNilType {
UNDEFINED,
NULL_TYPE,
MONOMORPHIC_MAP,
UNDETECTABLE,
+ GENERIC,
NUMBER_OF_TYPES
};
- class Types : public EnumSet<Type, byte> {
+ class State : public EnumSet<CompareNilType, byte> {
public:
- Types() : EnumSet<Type, byte>(0) { }
- explicit Types(byte bits) : EnumSet<Type, byte>(bits) { }
+ State() : EnumSet<CompareNilType, byte>(0) { }
+ explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
- static Types FullCompare() {
- Types set;
+ static State Generic() {
+ State set;
set.Add(UNDEFINED);
set.Add(NULL_TYPE);
set.Add(UNDETECTABLE);
+ set.Add(GENERIC);
return set;
}
void Print(StringStream* stream) const;
- void TraceTransition(Types to) const;
+ void TraceTransition(State to) const;
};
+ static Handle<Type> StateToType(
+ Isolate* isolate, State state, Handle<Map> map = Handle<Map>());
+
// At most 6 different types can be distinguished, because the Code object
// only has room for a single byte to hold a set and there are two more
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
- CompareNilICStub(NilValue nil, Types types = Types())
- : types_(types) {
- nil_value_ = nil;
+ CompareNilICStub(NilValue nil, State state = State())
+ : nil_value_(nil), state_(state) {
}
CompareNilICStub(Code::ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(init_state) {
nil_value_ = NilValueField::decode(ic_state);
- types_ = Types(ExtractTypesFromExtraICState(ic_state));
+ state_ = State(ExtractTypesFromExtraICState(ic_state));
}
static Handle<Code> GetUninitialized(Isolate* isolate,
@@ -1183,9 +1182,9 @@ class CompareNilICStub : public HydrogenCodeStub {
}
virtual InlineCacheState GetICState() {
- if (types_ == Types::FullCompare()) {
+ if (state_ == State::Generic()) {
return MEGAMORPHIC;
- } else if (types_.Contains(MONOMORPHIC_MAP)) {
+ } else if (state_.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
@@ -1198,20 +1197,21 @@ class CompareNilICStub : public HydrogenCodeStub {
// extra ic state = nil_value | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
- return NilValueField::encode(nil_value_) |
- types_.ToIntegral();
+ return NilValueField::encode(nil_value_) | state_.ToIntegral();
}
- static byte ExtractTypesFromExtraICState(
- Code::ExtraICState state) {
+ static byte ExtractTypesFromExtraICState(Code::ExtraICState state) {
return state & ((1 << NUMBER_OF_TYPES) - 1);
}
+ static NilValue ExtractNilValueFromExtraICState(Code::ExtraICState state) {
+ return NilValueField::decode(state);
+ }
void Record(Handle<Object> object);
- bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
+ bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
NilValue GetNilValue() const { return nil_value_; }
- Types GetTypes() const { return types_; }
- void ClearTypes() { types_.RemoveAll(); }
+ State GetState() const { return state_; }
+ void ClearState() { state_.RemoveAll(); }
virtual void PrintName(StringStream* stream);
@@ -1229,7 +1229,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual int NotMissMinorKey() { return GetExtraICState(); }
NilValue nil_value_;
- Types types_;
+ State state_;
DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
};
@@ -1733,27 +1733,51 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
};
+enum ContextCheckMode {
+ CONTEXT_CHECK_REQUIRED,
+ CONTEXT_CHECK_NOT_REQUIRED,
+ LAST_CONTEXT_CHECK_MODE = CONTEXT_CHECK_NOT_REQUIRED
+};
+
+
+enum AllocationSiteOverrideMode {
+ DONT_OVERRIDE,
+ DISABLE_ALLOCATION_SITES,
+ LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
+};
+
+
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
- ArrayConstructorStubBase(ElementsKind kind, bool disable_allocation_sites) {
+ ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode) {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
- ASSERT(!disable_allocation_sites ||
+ ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
- DisableAllocationSitesBits::encode(disable_allocation_sites);
+ AllocationSiteOverrideModeBits::encode(override_mode) |
+ ContextCheckModeBits::encode(context_mode);
}
ElementsKind elements_kind() const {
return ElementsKindBits::decode(bit_field_);
}
- bool disable_allocation_sites() const {
- return DisableAllocationSitesBits::decode(bit_field_);
+ AllocationSiteOverrideMode override_mode() const {
+ return AllocationSiteOverrideModeBits::decode(bit_field_);
+ }
+
+ ContextCheckMode context_mode() const {
+ return ContextCheckModeBits::decode(bit_field_);
+ }
+
+ virtual bool IsPregenerated() {
+ // We only pre-generate stubs that verify correct context
+ return context_mode() == CONTEXT_CHECK_REQUIRED;
}
- virtual bool IsPregenerated() { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -1764,8 +1788,14 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
private:
int NotMissMinorKey() { return bit_field_; }
+ // Ensure data fits within available bits.
+ STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
+ STATIC_ASSERT(LAST_CONTEXT_CHECK_MODE == 1);
+
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class DisableAllocationSitesBits: public BitField<bool, 8, 1> {};
+ class AllocationSiteOverrideModeBits: public
+ BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
+ class ContextCheckModeBits: public BitField<ContextCheckMode, 9, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@@ -1776,8 +1806,9 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
ElementsKind kind,
- bool disable_allocation_sites = false)
- : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
@@ -1797,8 +1828,9 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArraySingleArgumentConstructorStub(
ElementsKind kind,
- bool disable_allocation_sites = false)
- : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
@@ -1818,8 +1850,9 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNArgumentsConstructorStub(
ElementsKind kind,
- bool disable_allocation_sites = false)
- : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
virtual Handle<Code> GenerateCode();
@@ -1971,7 +2004,7 @@ class ToBooleanStub: public HydrogenCodeStub {
class Types : public EnumSet<Type, byte> {
public:
- Types() {}
+ Types() : EnumSet<Type, byte>(0) {}
explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
byte ToByte() const { return ToIntegral(); }
@@ -1980,10 +2013,10 @@ class ToBooleanStub: public HydrogenCodeStub {
bool Record(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
- };
+ bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
- static Types no_types() { return Types(); }
- static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+ static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+ };
explicit ToBooleanStub(Types types = Types())
: types_(types) { }
@@ -2135,13 +2168,6 @@ class ProfileEntryHookStub : public PlatformCodeStub {
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
- // Sets or unsets the entry hook function. Returns true on success,
- // false on an attempt to replace a non-NULL entry hook with another
- // non-NULL hook.
- static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
-
- static bool HasEntryHook() { return entry_hook_ != NULL; }
-
private:
static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer);
@@ -2151,9 +2177,6 @@ class ProfileEntryHookStub : public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- // The current function entry hook.
- static FunctionEntryHook entry_hook_;
-
DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index af2f1f667b..8029d2f882 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -30,6 +30,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "prettyprinter.h"
#include "rewriter.h"
@@ -178,7 +179,7 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
!isolate->cpu_profiler()->is_profiling()) {
return false;
}
- Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
+ Handle<String> name = Handle<String>::cast(type->AsLiteral()->value());
if (FLAG_log_regexp) {
if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
return true;
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 950c7e7374..c5604ab30f 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -295,6 +295,16 @@ function WeakMapDelete(key) {
}
+function WeakMapClear() {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakMapInitialize(this);
+}
+
+
// -------------------------------------------------------------------
function SetUpWeakMap() {
@@ -309,7 +319,8 @@ function SetUpWeakMap() {
"get", WeakMapGet,
"set", WeakMapSet,
"has", WeakMapHas,
- "delete", WeakMapDelete
+ "delete", WeakMapDelete,
+ "clear", WeakMapClear
));
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 5fc107f943..8edb41d724 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -53,7 +54,8 @@ namespace v8 {
namespace internal {
-CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
+CompilationInfo::CompilationInfo(Handle<Script> script,
+ Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()) {
@@ -71,7 +73,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
}
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
+ Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
@@ -83,7 +86,8 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
- Isolate* isolate, Zone* zone)
+ Isolate* isolate,
+ Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()) {
@@ -92,7 +96,9 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
}
-void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+void CompilationInfo::Initialize(Isolate* isolate,
+ Mode mode,
+ Zone* zone) {
isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
@@ -106,6 +112,9 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ dependencies_[i] = NULL;
+ }
if (mode == STUB) {
mode_ = STUB;
return;
@@ -125,6 +134,47 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
delete no_frame_ranges_;
+#ifdef DEBUG
+ // Check that no dependent maps have been added or added dependent maps have
+ // been rolled back or committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ASSERT_EQ(NULL, dependencies_[i]);
+ }
+#endif // DEBUG
+}
+
+
+void CompilationInfo::CommitDependencies(Handle<Code> code) {
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
+ ASSERT(!object_wrapper_.is_null());
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->UpdateToFinishedCode(group, this, *code);
+ }
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
+ }
+}
+
+
+void CompilationInfo::RollbackDependencies() {
+ // Unregister from all dependent maps if not yet committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->RemoveCompilationInfo(group, this);
+ }
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
+ }
}
@@ -329,7 +379,10 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- HPhase phase(HPhase::kFullCodeGen, isolate());
+ int64_t start_ticks = 0;
+ if (FLAG_hydrogen_stats) {
+ start_ticks = OS::Ticks();
+ }
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
@@ -346,6 +399,10 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
Compiler::RecordFunctionCompilation(
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
+ if (FLAG_hydrogen_stats) {
+ int64_t ticks = OS::Ticks() - start_ticks;
+ isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
+ }
}
// Check that the unoptimized, shared code is ready for
@@ -364,7 +421,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
// Type-check the function.
- AstTyper::Type(info());
+ AstTyper::Run(info());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
@@ -490,7 +547,6 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
ASSERT(!isolate->native_context().is_null());
@@ -773,7 +829,6 @@ static bool InstallFullCode(CompilationInfo* info) {
// Check the function has compiled code.
ASSERT(shared->is_compiled());
- shared->set_code_age(0);
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
@@ -855,8 +910,6 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
-
// The VM is in the COMPILER state until exiting this function.
VMState<COMPILER> state(isolate);
@@ -982,7 +1035,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
+ info->AbortOptimization();
InstallFullCode(*info);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** aborting optimization for ");
@@ -1000,9 +1053,14 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (status != OptimizingCompiler::SUCCEEDED) {
- optimizing_compiler->info()->set_bailout_reason(
- "failed/bailed out last time");
+ if (info->HasAbortedDueToDependencyChange()) {
+ info->set_bailout_reason("bailed out due to dependent map");
+ status = optimizing_compiler->AbortOptimization();
+ } else if (status != OptimizingCompiler::SUCCEEDED) {
+ info->set_bailout_reason("failed/bailed out last time");
+ status = optimizing_compiler->AbortOptimization();
+ } else if (isolate->DebuggerHasBreakPoints()) {
+ info->set_bailout_reason("debugger is active");
status = optimizing_compiler->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
@@ -1167,4 +1225,31 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
info));
}
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+ : name_(name), info_(info), zone_(info->isolate()) {
+ if (FLAG_hydrogen_stats) {
+ info_zone_start_allocation_size_ = info->zone()->allocation_size();
+ start_ticks_ = OS::Ticks();
+ }
+}
+
+
+CompilationPhase::~CompilationPhase() {
+ if (FLAG_hydrogen_stats) {
+ unsigned size = zone()->allocation_size();
+ size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+ int64_t ticks = OS::Ticks() - start_ticks_;
+ isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
+ }
+}
+
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+ // Produce trace output if flag is set so that the first letter of the
+ // phase name matches the command line parameter FLAG_trace_phase.
+ return (FLAG_trace_hydrogen &&
+ OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 8e6d295996..161f40458c 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -57,12 +57,8 @@ struct OffsetRange {
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
- CompilationInfo(Handle<Script> script, Zone* zone);
- CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
-
- ~CompilationInfo();
+ virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
@@ -243,6 +239,18 @@ class CompilationInfo {
deferred_handles_ = deferred_handles;
}
+ ZoneList<Handle<HeapObject> >* dependencies(
+ DependentCode::DependencyGroup group) {
+ if (dependencies_[group] == NULL) {
+ dependencies_[group] = new(zone_) ZoneList<Handle<HeapObject> >(2, zone_);
+ }
+ return dependencies_[group];
+ }
+
+ void CommitDependencies(Handle<Code> code);
+
+ void RollbackDependencies();
+
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
@@ -276,6 +284,30 @@ class CompilationInfo {
return result;
}
+ Handle<Foreign> object_wrapper() {
+ if (object_wrapper_.is_null()) {
+ object_wrapper_ =
+ isolate()->factory()->NewForeign(reinterpret_cast<Address>(this));
+ }
+ return object_wrapper_;
+ }
+
+ void AbortDueToDependencyChange() {
+ mode_ = DEPENDENCY_CHANGE_ABORT;
+ }
+
+ bool HasAbortedDueToDependencyChange() {
+ return mode_ == DEPENDENCY_CHANGE_ABORT;
+ }
+
+ protected:
+ CompilationInfo(Handle<Script> script,
+ Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate,
+ Zone* zone);
private:
Isolate* isolate_;
@@ -289,7 +321,8 @@ class CompilationInfo {
BASE,
OPTIMIZE,
NONOPT,
- STUB
+ STUB,
+ DEPENDENCY_CHANGE_ABORT
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
@@ -369,6 +402,8 @@ class CompilationInfo {
DeferredHandles* deferred_handles_;
+ ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
+
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
@@ -387,6 +422,8 @@ class CompilationInfo {
// during graph optimization.
int opt_count_;
+ Handle<Foreign> object_wrapper_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -397,24 +434,26 @@ class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
- zone_(script->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(script->GetIsolate()) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
: CompilationInfo(shared_info, &zone_),
- zone_(shared_info->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(shared_info->GetIsolate()) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
: CompilationInfo(closure, &zone_),
- zone_(closure->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
- explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ zone_(closure->GetIsolate()) {}
+ CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
- zone_(isolate),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(isolate) {}
+
+ // Virtual destructor because a CompilationInfoWithZone has to exit the
+ // zone scope and get rid of dependent maps even when the destructor is
+ // called when cast as a CompilationInfo.
+ virtual ~CompilationInfoWithZone() {
+ RollbackDependencies();
+ }
private:
Zone zone_;
- ZoneScope zone_scope_;
};
@@ -578,6 +617,30 @@ class Compiler : public AllStatic {
};
+class CompilationPhase BASE_EMBEDDED {
+ public:
+ CompilationPhase(const char* name, CompilationInfo* info);
+ ~CompilationPhase();
+
+ protected:
+ bool ShouldProduceTraceOutput() const;
+
+ const char* name() const { return name_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
+ Zone* zone() { return &zone_; }
+
+ private:
+ const char* name_;
+ CompilationInfo* info_;
+ Zone zone_;
+ unsigned info_zone_start_allocation_size_;
+ int64_t start_ticks_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_COMPILER_H_
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index f04ccd1f3e..fdf6d27ef5 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -134,6 +134,7 @@ enum BindingFlags {
V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
+ V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@@ -299,6 +300,7 @@ class Context: public FixedArray {
FLOAT_ARRAY_FUN_INDEX,
DOUBLE_ARRAY_FUN_INDEX,
UINT8C_ARRAY_FUN_INDEX,
+ DATA_VIEW_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 4982197cab..c3cc27c770 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -56,6 +56,17 @@ void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
+void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ CodeEntry* entry = code_map->FindEntry(start);
+ if (!entry) {
+ // Code objects for builtins should already have been added to the map but
+ // some of them have been filtered out by CpuProfiler.
+ return;
+ }
+ entry->SetBuiltinId(builtin_id);
+}
+
+
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
@@ -64,16 +75,6 @@ TickSample* ProfilerEventsProcessor::TickSampleEvent() {
}
-bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
- Logger::LogEventsAndTags tag) {
- return FLAG_prof_browser_mode
- && (tag != Logger::CALLBACK_TAG
- && tag != Logger::FUNCTION_TAG
- && tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
- && tag != Logger::SCRIPT_TAG);
-}
-
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_INL_H_
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 42722191bd..b3800f5877 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -45,11 +45,9 @@ static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(
- ProfileGenerator* generator, CpuProfilesCollection* profiles)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- profiles_(profiles),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
@@ -58,127 +56,15 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(
}
-void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix,
- Name* name,
- Address start) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = profiles_->NewCodeEntry(tag, prefix, name);
- rec->size = 1;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Name* name,
- String* resource_name,
- int line_number,
- Address start,
- unsigned size,
- Address shared,
- CompilationInfo* info) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number);
- if (info) {
- rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
- }
- rec->size = size;
- rec->shared = shared;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = profiles_->NewCodeEntry(tag, name);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = profiles_->NewCodeEntry(tag, args_count);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec;
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->type = CodeEventRecord::CODE_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
+void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
+ event.generic.order = ++enqueue_order_;
+ events_buffer_.Enqueue(event);
}
-void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
- Address to) {
- CodeEventsContainer evt_rec;
- SharedFunctionInfoMoveEventRecord* rec =
- &evt_rec.SharedFunctionInfoMoveEventRecord_;
- rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::RegExpCodeCreateEvent(
- Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = profiles_->NewCodeEntry(tag, prefix, name);
- rec->size = size;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::AddCurrentStack() {
+void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(enqueue_order_);
TickSample* sample = &record.sample;
- Isolate* isolate = Isolate::Current();
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
for (StackTraceFrameIterator it(isolate);
@@ -191,9 +77,8 @@ void ProfilerEventsProcessor::AddCurrentStack() {
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
- if (!events_buffer_.IsEmpty()) {
- CodeEventsContainer record;
- events_buffer_.Dequeue(&record);
+ CodeEventsContainer record;
+ if (events_buffer_.Dequeue(&record)) {
switch (record.generic.type) {
#define PROFILER_TYPE_CASE(type, clss) \
case CodeEventRecord::type: \
@@ -306,30 +191,56 @@ bool CpuProfiler::HasDetachedProfiles() {
}
+static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag) {
+ return FLAG_prof_browser_mode
+ && (tag != Logger::CALLBACK_TAG
+ && tag != Logger::FUNCTION_TAG
+ && tag != Logger::LAZY_COMPILE_TAG
+ && tag != Logger::REG_EXP_TAG
+ && tag != Logger::SCRIPT_TAG);
+}
+
+
void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
- processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name),
+ TokenEnumerator::kInheritsSecurityToken);
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment) {
- processor_->CodeCreateEvent(
- tag, comment, code->address(), code->ExecutableSize());
+ Code* code,
+ const char* name) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, Name* name) {
- processor_->CodeCreateEvent(
- tag,
- name,
- isolate_->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- NULL,
- NULL);
+ Code* code,
+ Name* name) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
@@ -338,15 +249,22 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* name) {
- processor_->CodeCreateEvent(
- tag,
- name,
- isolate_->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- shared->address(),
- info);
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ if (info) {
+ rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ }
+ if (shared->script()->IsScript()) {
+ ASSERT(Script::cast(shared->script()));
+ Script* script = Script::cast(shared->script());
+ rec->entry->set_script_id(script->id()->value());
+ }
+ rec->size = code->ExecutableSize();
+ rec->shared = shared->address();
+ processor_->Enqueue(evt_rec);
}
@@ -355,30 +273,53 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
String* source, int line) {
- processor_->CodeCreateEvent(
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
tag,
- shared->DebugName(),
- source,
- line,
- code->address(),
- code->ExecutableSize(),
- shared->address(),
- info);
+ profiles_->GetFunctionName(shared->DebugName()),
+ TokenEnumerator::kNoSecurityToken,
+ CodeEntry::kEmptyNamePrefix,
+ profiles_->GetName(source),
+ line);
+ if (info) {
+ rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ }
+ ASSERT(Script::cast(shared->script()));
+ Script* script = Script::cast(shared->script());
+ rec->entry->set_script_id(script->id()->value());
+ rec->size = code->ExecutableSize();
+ rec->shared = shared->address();
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count) {
- processor_->CodeCreateEvent(
+ Code* code,
+ int args_count) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
tag,
- args_count,
- code->address(),
- code->ExecutableSize());
+ profiles_->GetName(args_count),
+ TokenEnumerator::kInheritsSecurityToken,
+ "args_count: ");
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- processor_->CodeMoveEvent(from, to);
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+ CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+ rec->from = from;
+ rec->to = to;
+ processor_->Enqueue(evt_rec);
}
@@ -387,29 +328,59 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- processor_->SharedFunctionInfoMoveEvent(from, to);
+ CodeEventsContainer evt_rec(CodeEventRecord::SHARED_FUNC_MOVE);
+ SharedFunctionInfoMoveEventRecord* rec =
+ &evt_rec.SharedFunctionInfoMoveEventRecord_;
+ rec->from = from;
+ rec->to = to;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
- processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "get ", name, entry_point);
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name),
+ TokenEnumerator::kInheritsSecurityToken,
+ "get ");
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- processor_->RegExpCodeCreateEvent(
+ if (FilterOutCodeCreateEvent(Logger::REG_EXP_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
Logger::REG_EXP_TAG,
- "RegExp: ",
- source,
- code->address(),
- code->ExecutableSize());
+ profiles_->GetName(source),
+ TokenEnumerator::kInheritsSecurityToken,
+ "RegExp: ");
+ rec->size = code->ExecutableSize();
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
- processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "set ", name, entry_point);
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name),
+ TokenEnumerator::kInheritsSecurityToken,
+ "set ");
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
@@ -425,7 +396,23 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
}
+CpuProfiler::CpuProfiler(Isolate* isolate,
+ CpuProfilesCollection* test_profiles,
+ ProfileGenerator* test_generator,
+ ProfilerEventsProcessor* test_processor)
+ : isolate_(isolate),
+ profiles_(test_profiles),
+ next_profile_uid_(1),
+ token_enumerator_(new TokenEnumerator()),
+ generator_(test_generator),
+ processor_(test_processor),
+ need_to_stop_sampler_(false),
+ is_profiling_(false) {
+}
+
+
CpuProfiler::~CpuProfiler() {
+ ASSERT(!is_profiling_);
delete token_enumerator_;
delete profiles_;
}
@@ -440,7 +427,7 @@ void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
StartProcessorIfNotStarted();
}
- processor_->AddCurrentStack();
+ processor_->AddCurrentStack(isolate_);
}
@@ -451,23 +438,24 @@ void CpuProfiler::StartProfiling(String* title, bool record_samples) {
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
+ Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_logging_nesting_ = isolate_->logger()->logging_nesting_;
- isolate_->logger()->logging_nesting_ = 0;
+ saved_logging_nesting_ = logger->logging_nesting_;
+ logger->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_, profiles_);
+ processor_ = new ProfilerEventsProcessor(generator_);
is_profiling_ = true;
processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
- if (isolate_->heap()->HasBeenSetUp()) {
- if (!FLAG_prof_browser_mode) {
- isolate_->logger()->LogCodeObjects();
- }
- isolate_->logger()->LogCompiledFunctions();
- isolate_->logger()->LogAccessorCallbacks();
+ ASSERT(isolate_->heap()->HasBeenSetUp());
+ if (!FLAG_prof_browser_mode) {
+ logger->LogCodeObjects();
}
+ logger->LogCompiledFunctions();
+ logger->LogAccessorCallbacks();
+ LogBuiltins();
// Enable stack sampling.
- Sampler* sampler = isolate_->logger()->sampler();
+ Sampler* sampler = logger->sampler();
sampler->IncreaseProfilingDepth();
if (!sampler->IsActive()) {
sampler->Start();
@@ -526,4 +514,18 @@ void CpuProfiler::StopProcessor() {
}
+void CpuProfiler::LogBuiltins() {
+ Builtins* builtins = isolate_->builtins();
+ ASSERT(builtins->is_initialized());
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
+ ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
+ Builtins::Name id = static_cast<Builtins::Name>(i);
+ rec->start = builtins->builtin(id)->address();
+ rec->builtin_id = id;
+ processor_->Enqueue(evt_rec);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 2f8479fcca..77fdb0681b 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -49,7 +49,8 @@ class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
+ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
+ V(REPORT_BUILTIN, ReportBuiltinEventRecord)
class CodeEventRecord {
@@ -63,7 +64,7 @@ class CodeEventRecord {
#undef DECLARE_TYPE
Type type;
- unsigned order;
+ mutable unsigned order;
};
@@ -96,6 +97,15 @@ class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
};
+class ReportBuiltinEventRecord : public CodeEventRecord {
+ public:
+ Address start;
+ Builtins::Name builtin_id;
+
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
class TickSampleEventRecord {
public:
// The parameterless constructor is used when we dequeue data from
@@ -122,43 +132,36 @@ class TickSampleEventRecord {
};
+class CodeEventsContainer {
+ public:
+ explicit CodeEventsContainer(
+ CodeEventRecord::Type type = CodeEventRecord::NONE) {
+ generic.type = type;
+ }
+ union {
+ CodeEventRecord generic;
+#define DECLARE_CLASS(ignore, type) type type##_;
+ CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
+#undef DECLARE_TYPE
+ };
+};
+
+
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- ProfilerEventsProcessor(ProfileGenerator* generator,
- CpuProfilesCollection* profiles);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
virtual void Run();
inline void Stop() { running_ = false; }
INLINE(bool running()) { return running_; }
+ void Enqueue(const CodeEventsContainer& event);
- // Events adding methods. Called by VM threads.
- void CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, Name* name,
- Address start);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Name* name,
- String* resource_name, int line_number,
- Address start, unsigned size,
- Address shared,
- CompilationInfo* info);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start, unsigned size);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start, unsigned size);
- void CodeMoveEvent(Address from, Address to);
- void CodeDeleteEvent(Address from);
- void SharedFunctionInfoMoveEvent(Address from, Address to);
- void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start, unsigned size);
// Puts current stack into tick sample events buffer.
- void AddCurrentStack();
+ void AddCurrentStack(Isolate* isolate);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -167,21 +170,11 @@ class ProfilerEventsProcessor : public Thread {
INLINE(TickSample* TickSampleEvent());
private:
- union CodeEventsContainer {
- CodeEventRecord generic;
-#define DECLARE_CLASS(ignore, type) type type##_;
- CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
- };
-
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
- INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
-
ProfileGenerator* generator_;
- CpuProfilesCollection* profiles_;
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
@@ -204,6 +197,12 @@ class ProfilerEventsProcessor : public Thread {
class CpuProfiler {
public:
explicit CpuProfiler(Isolate* isolate);
+
+ CpuProfiler(Isolate* isolate,
+ CpuProfilesCollection* test_collection,
+ ProfileGenerator* test_generator,
+ ProfilerEventsProcessor* test_processor);
+
~CpuProfiler();
void StartProfiling(const char* title, bool record_samples = false);
@@ -248,12 +247,16 @@ class CpuProfiler {
void SharedFunctionInfoMoveEvent(Address from, Address to);
INLINE(bool is_profiling() const) { return is_profiling_; }
+ bool* is_profiling_address() {
+ return &is_profiling_;
+ }
private:
void StartProcessorIfNotStarted();
void StopProcessorIfLastProfile(const char* title);
void StopProcessor();
void ResetProfiles();
+ void LogBuiltins();
Isolate* isolate_;
CpuProfilesCollection* profiles_;
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index e1d29d98ef..aac7aab156 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -68,7 +68,7 @@ void HandleDebugEvent(DebugEvent event,
// Get the toJSONProtocol function on the event and get the JSON format.
Local<String> to_json_fun_name = String::New("toJSONProtocol");
Local<Function> to_json_fun =
- Function::Cast(*event_data->Get(to_json_fun_name));
+ Local<Function>::Cast(event_data->Get(to_json_fun_name));
Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
@@ -91,9 +91,9 @@ void HandleDebugEvent(DebugEvent event,
// Get the debug command processor.
Local<String> fun_name = String::New("debugCommandProcessor");
- Local<Function> fun = Function::Cast(*exec_state->Get(fun_name));
+ Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
- Object::Cast(*fun->Call(exec_state, 0, NULL));
+ Local<Object>::Cast(fun->Call(exec_state, 0, NULL));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
return;
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index 5226364c64..298518d72a 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -150,18 +150,19 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
+ HandleScope scope;
+ Handle<Array> completions;
if (state == 0) {
- HandleScope scope;
Local<String> full_text = String::New(rl_line_buffer, rl_point);
- Handle<Array> completions =
- Shell::GetCompletions(isolate, String::New(text), full_text);
- current_completions = Persistent<Array>::New(isolate, completions);
+ completions = Shell::GetCompletions(isolate, String::New(text), full_text);
+ current_completions.Reset(isolate, completions);
current_index = 0;
+ } else {
+ completions = Local<Array>::New(isolate, current_completions);
}
- if (current_index < current_completions->Length()) {
- HandleScope scope;
+ if (current_index < completions->Length()) {
Handle<Integer> index = Integer::New(current_index);
- Handle<Value> str_obj = current_completions->Get(index);
+ Handle<Value> str_obj = completions->Get(index);
current_index++;
String::Utf8Value str(str_obj);
return strdup(*str);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index a917dbdbe3..65af987b42 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -1076,14 +1076,15 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
}
static void ReadBufferWeakCallback(v8::Isolate* isolate,
- Persistent<Value>* object,
+ Persistent<ArrayBuffer>* array_buffer,
uint8_t* data) {
- size_t byte_length = ArrayBuffer::Cast(**object)->ByteLength();
+ size_t byte_length =
+ Local<ArrayBuffer>::New(isolate, *array_buffer)->ByteLength();
isolate->AdjustAmountOfExternalAllocatedMemory(
-static_cast<intptr_t>(byte_length));
delete[] data;
- object->Dispose(isolate);
+ array_buffer->Dispose();
}
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1103,8 +1104,8 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
- v8::Persistent<v8::Value> weak_handle(isolate, buffer);
- weak_handle.MakeWeak(isolate, data, ReadBufferWeakCallback);
+ v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
+ weak_handle.MakeWeak(data, ReadBufferWeakCallback);
weak_handle.MarkIndependent();
isolate->AdjustAmountOfExternalAllocatedMemory(length);
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index ea043dcf4f..47a7cc0118 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -26,12 +26,13 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
- 'includes': ['../build/common.gypi'],
'variables': {
+ 'v8_code': 1,
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
},
+ 'includes': ['../build/common.gypi'],
'targets': [
{
'target_name': 'd8',
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index df1046133f..3efea06378 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -1020,7 +1020,7 @@ DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
args.substring(nextPos + 1, args.length) : 'all';
if (!arg2) {
arg2 = 'all'; // if unspecified, set for all.
- } if (arg2 == 'unc') { // check for short cut.
+ } else if (arg2 == 'unc') { // check for short cut.
arg2 = 'uncaught';
}
excType = arg2;
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 7eeb794fa0..8ceccf67c5 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -215,6 +215,8 @@ class GrowableBitVector BASE_EMBEDDED {
};
GrowableBitVector() : bits_(NULL) { }
+ GrowableBitVector(int length, Zone* zone)
+ : bits_(new(zone) BitVector(length, zone)) { }
bool Contains(int value) const {
if (!InBitsRange(value)) return false;
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 7787312ddc..88efbe212a 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -71,6 +71,13 @@ Debug.ScriptBreakPointType = { ScriptId: 0,
ScriptName: 1,
ScriptRegExp: 2 };
+// The different types of breakpoint position alignments.
+// Must match BreakPositionAlignment in debug.h.
+Debug.BreakPositionAlignment = {
+ Statement: 0,
+ BreakPosition: 1
+};
+
function ScriptTypeFlag(type) {
return (1 << type);
}
@@ -251,7 +258,7 @@ function IsBreakPointTriggered(break_id, break_point) {
// script name or script id and the break point is represented as line and
// column.
function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
+ opt_groupId, opt_position_alignment) {
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
@@ -265,6 +272,8 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
this.line_ = opt_line || 0;
this.column_ = opt_column;
this.groupId_ = opt_groupId;
+ this.position_alignment_ = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
this.hit_count_ = 0;
this.active_ = true;
this.condition_ = null;
@@ -276,7 +285,8 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
//Creates a clone of script breakpoint that is linked to another script.
ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- other_script.id, this.line_, this.column_, this.groupId_);
+ other_script.id, this.line_, this.column_, this.groupId_,
+ this.position_alignment_);
copy.number_ = next_break_point_number++;
script_break_points.push(copy);
@@ -443,7 +453,9 @@ ScriptBreakPoint.prototype.set = function (script) {
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this);
break_point.setIgnoreCount(this.ignoreCount());
- var actual_position = %SetScriptBreakPoint(script, position, break_point);
+ var actual_position = %SetScriptBreakPoint(script, position,
+ this.position_alignment_,
+ break_point);
if (IS_UNDEFINED(actual_position)) {
actual_position = position;
}
@@ -509,9 +521,11 @@ Debug.breakExecution = function(f) {
%Break();
};
-Debug.breakLocations = function(f) {
+Debug.breakLocations = function(f, opt_position_aligment) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %GetBreakLocations(f);
+ var position_aligment = IS_UNDEFINED(opt_position_aligment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_aligment;
+ return %GetBreakLocations(f, position_aligment);
};
// Returns a Script object. If the parameter is a function the return value
@@ -674,7 +688,8 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
+ condition, enabled,
+ opt_position_alignment)
{
break_point = MakeBreakPoint(position);
break_point.setCondition(condition);
@@ -682,10 +697,12 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
break_point.disable();
}
var scripts = this.scripts();
+ var position_alignment = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
for (var i = 0; i < scripts.length; i++) {
if (script_id == scripts[i].id) {
break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
- break_point);
+ position_alignment, break_point);
break;
}
}
@@ -780,11 +797,11 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) {
// specified source line and column within that line.
Debug.setScriptBreakPoint = function(type, script_id_or_name,
opt_line, opt_column, opt_condition,
- opt_groupId) {
+ opt_groupId, opt_position_alignment) {
// Create script break point object.
var script_break_point =
new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
+ opt_groupId, opt_position_alignment);
// Assign number to the new script break point and add it.
script_break_point.number_ = next_break_point_number++;
@@ -806,10 +823,12 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name,
Debug.setScriptBreakPointById = function(script_id,
opt_line, opt_column,
- opt_condition, opt_groupId) {
+ opt_condition, opt_groupId,
+ opt_position_alignment) {
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
+ opt_condition, opt_groupId,
+ opt_position_alignment);
};
@@ -893,11 +912,11 @@ Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
-Debug.showBreakPoints = function(f, full) {
+Debug.showBreakPoints = function(f, full, opt_position_alignment) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var source = full ? this.scriptSource(f) : this.source(f);
var offset = full ? this.sourcePosition(f) : 0;
- var locations = this.breakLocations(f);
+ var locations = this.breakLocations(f, opt_position_alignment);
if (!locations) return source;
locations.sort(function(x, y) { return x - y; });
var result = "";
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 5d26ba2b13..07c1a0cce8 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -235,17 +235,30 @@ void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Find the break point closest to the supplied source position.
-void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
+void BreakLocationIterator::FindBreakLocationFromPosition(int position,
+ BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
int closest_break_point = 0;
int distance = kMaxInt;
+
while (!Done()) {
+ int next_position;
+ switch (alignment) {
+ case STATEMENT_ALIGNED:
+ next_position = this->statement_position();
+ break;
+ case BREAK_POSITION_ALIGNED:
+ next_position = this->position();
+ break;
+ default:
+ UNREACHABLE();
+ next_position = this->statement_position();
+ }
// Check if this break point is closer that what was previously found.
- if (position <= statement_position() &&
- statement_position() - position < distance) {
+ if (position <= next_position && next_position - position < distance) {
closest_break_point = break_point();
- distance = statement_position() - position;
+ distance = next_position - position;
// Check whether we can't get any closer.
if (distance == 0) break;
}
@@ -390,6 +403,20 @@ void BreakLocationIterator::ClearDebugBreak() {
}
+bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
+ if (RelocInfo::IsConstructCall(rmode())) {
+ return true;
+ } else if (RelocInfo::IsCodeTarget(rmode())) {
+ HandleScope scope(debug_info_->GetIsolate());
+ Address target = rinfo()->target_address();
+ Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ return target_code->is_call_stub() || target_code->is_keyed_call_stub();
+ } else {
+ return false;
+ }
+}
+
+
void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
HandleScope scope(isolate);
@@ -606,7 +633,7 @@ const int Debug::kFrameDropperFrameSize = 4;
void ScriptCache::Add(Handle<Script> script) {
GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Create an entry in the hash map for the script.
- int id = Smi::cast(script->id())->value();
+ int id = script->id()->value();
HashMap::Entry* entry =
HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
if (entry->value != NULL) {
@@ -670,11 +697,11 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
- reinterpret_cast<Script**>(Utils::OpenHandle(**obj).location());
+ reinterpret_cast<Script**>(Utils::OpenPersistent(*obj).location());
ASSERT((*location)->IsScript());
// Remove the entry from the cache.
- int id = Smi::cast((*location)->id())->value();
+ int id = (*location)->id()->value();
script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
script_cache->collected_scripts_.Add(id);
@@ -1176,7 +1203,7 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(*source_position);
+ it.FindBreakLocationFromPosition(*source_position, STATEMENT_ALIGNED);
it.SetBreakPoint(break_point_object);
*source_position = it.position();
@@ -1188,7 +1215,8 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position) {
+ int* source_position,
+ BreakPositionAlignment alignment) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@@ -1219,7 +1247,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(position);
+ it.FindBreakLocationFromPosition(position, alignment);
it.SetBreakPoint(break_point_object);
*source_position = it.position() + shared->start_position();
@@ -1673,7 +1701,8 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared) {
+ Handle<SharedFunctionInfo> shared,
+ BreakPositionAlignment position_alignment) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
if (!HasDebugInfo(shared)) {
@@ -1691,7 +1720,20 @@ Handle<Object> Debug::GetSourceBreakLocations(
BreakPointInfo* break_point_info =
BreakPointInfo::cast(debug_info->break_points()->get(i));
if (break_point_info->GetBreakPointCount() > 0) {
- locations->set(count++, break_point_info->statement_position());
+ Smi* position;
+ switch (position_alignment) {
+ case STATEMENT_ALIGNED:
+ position = break_point_info->statement_position();
+ break;
+ case BREAK_POSITION_ALIGNED:
+ position = break_point_info->source_position();
+ break;
+ default:
+ UNREACHABLE();
+ position = break_point_info->statement_position();
+ }
+
+ locations->set(count++, position);
}
}
}
@@ -2046,13 +2088,30 @@ void Debug::PrepareForBreakPoints() {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
SharedFunctionInfo* shared = function->shared();
- if (shared->allows_lazy_compilation() &&
- shared->script()->IsScript() &&
- function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots() &&
- shared->code()->gc_metadata() != active_code_marker) {
+
+ if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->script()->IsScript()) continue;
+ if (shared->code()->gc_metadata() == active_code_marker) continue;
+
+ Code::Kind kind = function->code()->kind();
+ if (kind == Code::FUNCTION &&
+ !function->code()->has_debug_break_slots()) {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
+ } else if (kind == Code::BUILTIN &&
+ (function->IsMarkedForInstallingRecompiledCode() ||
+ function->IsInRecompileQueue() ||
+ function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation())) {
+ // Abort in-flight compilation.
+ Code* shared_code = function->shared()->code();
+ if (shared_code->kind() == Code::FUNCTION &&
+ shared_code->has_debug_break_slots()) {
+ function->set_code(shared_code);
+ } else {
+ function->set_code(*lazy_compile);
+ function->shared()->set_code(*lazy_compile);
+ }
}
}
}
@@ -3066,13 +3125,14 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::String> fun_name =
v8::String::New("debugCommandProcessor");
v8::Local<v8::Function> fun =
- v8::Function::Cast(*api_exec_state->Get(fun_name));
+ v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
v8::Handle<v8::Boolean> running =
auto_continue ? v8::True() : v8::False();
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
- cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
+ cmd_processor = v8::Local<v8::Object>::Cast(
+ fun->Call(api_exec_state, kArgc, argv));
if (try_catch.HasCaught()) {
PrintLn(try_catch.Exception());
return;
@@ -3112,7 +3172,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Value> request;
v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugRequest");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+ fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
request = v8::String::New(command.text().start(),
command.text().length());
@@ -3125,7 +3185,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
if (!try_catch.HasCaught()) {
// Get response string.
if (!response_val->IsUndefined()) {
- response = v8::String::Cast(*response_val);
+ response = v8::Local<v8::String>::Cast(response_val);
} else {
response = v8::String::New("");
}
@@ -3138,7 +3198,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Get the running state.
fun_name = v8::String::New("isRunning");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+ fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { response };
v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 467acb93e8..67debc7543 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -79,6 +79,14 @@ enum BreakLocatorType {
};
+// The different types of breakpoint position alignments.
+// Must match Debug.BreakPositionAlignment in debug-debugger.js
+enum BreakPositionAlignment {
+ STATEMENT_ALIGNED = 0,
+ BREAK_POSITION_ALIGNED = 1
+};
+
+
// Class for iterating through the break points in a function and changing
// them.
class BreakLocationIterator {
@@ -90,13 +98,15 @@ class BreakLocationIterator {
void Next();
void Next(int count);
void FindBreakLocationFromAddress(Address pc);
- void FindBreakLocationFromPosition(int position);
+ void FindBreakLocationFromPosition(int position,
+ BreakPositionAlignment alignment);
void Reset();
bool Done() const;
void SetBreakPoint(Handle<Object> break_point_object);
void ClearBreakPoint(Handle<Object> break_point_object);
void SetOneShot();
void ClearOneShot();
+ bool IsStepInLocation(Isolate* isolate);
void PrepareStepIn(Isolate* isolate);
bool IsExit() const;
bool HasBreakPoint();
@@ -240,7 +250,8 @@ class Debug {
int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position);
+ int* source_position,
+ BreakPositionAlignment alignment);
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<JSFunction> function);
@@ -283,7 +294,8 @@ class Debug {
static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared);
+ Handle<SharedFunctionInfo> shared,
+ BreakPositionAlignment position_aligment);
// Getter for the debug_context.
inline Handle<Context> debug_context() { return debug_context_; }
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 723d3f692e..f322e85b21 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -284,7 +284,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
+ Zone zone(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
@@ -293,11 +293,11 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
// Create a snapshot of the optimized functions list. This is needed because
// visitors might remove more than one link from the list at once.
- ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
+ ZoneList<JSFunction*> snapshot(1, &zone);
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
- snapshot.Add(element_function, isolate->runtime_zone());
+ snapshot.Add(element_function, &zone);
element = element_function->next_function_link();
}
@@ -420,11 +420,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Context* context = function->context()->native_context();
Isolate* isolate = context->GetIsolate();
Object* undefined = isolate->heap()->undefined_value();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
- ZoneList<Code*> codes(1, zone);
+ Zone zone(isolate);
+ ZoneList<Code*> codes(1, &zone);
DeoptimizeWithMatchingCodeFilter filter(code);
- PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined);
+ PartitionOptimizedFunctions(context, &filter, &codes, &zone, undefined);
ASSERT_EQ(1, codes.length());
DeoptimizeFunctionWithPreparedFunctionList(
JSFunction::cast(codes.at(0)->deoptimizing_functions()));
@@ -437,10 +436,9 @@ void Deoptimizer::DeoptimizeAllFunctionsForContext(
ASSERT(context->IsNativeContext());
Isolate* isolate = context->GetIsolate();
Object* undefined = isolate->heap()->undefined_value();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
- ZoneList<Code*> codes(1, zone);
- PartitionOptimizedFunctions(context, filter, &codes, zone, undefined);
+ Zone zone(isolate);
+ ZoneList<Code*> codes(1, &zone);
+ PartitionOptimizedFunctions(context, filter, &codes, &zone, undefined);
for (int i = 0; i < codes.length(); ++i) {
DeoptimizeFunctionWithPreparedFunctionList(
JSFunction::cast(codes.at(i)->deoptimizing_functions()));
@@ -534,8 +532,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_count_(0),
jsframe_count_(0),
output_(NULL),
- deferred_arguments_objects_values_(0),
- deferred_arguments_objects_(0),
+ deferred_objects_tagged_values_(0),
+ deferred_objects_double_values_(0),
+ deferred_objects_(0),
deferred_heap_numbers_(0),
trace_(false) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
@@ -546,6 +545,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
+ isolate->counters()->soft_deopts_executed()->Increment();
// Soft deopts shouldn't count against the overall re-optimization count
// that can eventually lead to disabling optimization for a function.
int opt_count = function->shared()->opt_count();
@@ -714,6 +714,10 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
int64_t start = OS::Ticks();
+ if (FLAG_log_timer_events &&
+ compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ LOG(isolate(), CodeDeoptEvent(compiled_code_));
+ }
if (trace_) {
PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
@@ -787,7 +791,6 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::DOUBLE_STACK_SLOT:
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
- case Translation::DUPLICATE:
default:
UNREACHABLE();
break;
@@ -1510,8 +1513,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
+ (output_frame_size - output_frame_offset) + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -1566,15 +1569,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
- // Handlify all argument object values before triggering any allocation.
- List<Handle<Object> > values(deferred_arguments_objects_values_.length());
- for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
- values.Add(Handle<Object>(deferred_arguments_objects_values_[i],
- isolate_));
+ // Handlify all tagged object values before triggering any allocation.
+ List<Handle<Object> > values(deferred_objects_tagged_values_.length());
+ for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
+ values.Add(Handle<Object>(deferred_objects_tagged_values_[i], isolate_));
}
// Play it safe and clear all unhandlified values before we continue.
- deferred_arguments_objects_values_.Clear();
+ deferred_objects_tagged_values_.Clear();
// Materialize all heap numbers before looking at arguments because when the
// output frames are used to materialize arguments objects later on they need
@@ -1591,6 +1593,18 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
Memory::Object_at(d.slot_address()) = *num;
}
+ // Materialize all heap numbers required for arguments objects.
+ for (int i = 0; i < values.length(); i++) {
+ if (!values.at(i)->IsTheHole()) continue;
+ double double_value = deferred_objects_double_values_[i];
+ Handle<Object> num = isolate_->factory()->NewNumber(double_value);
+ if (trace_) {
+ PrintF("Materializing a new heap number %p [%e] for arguments object\n",
+ reinterpret_cast<void*>(*num), double_value);
+ }
+ values.Set(i, num);
+ }
+
// Materialize arguments objects one frame at a time.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
@@ -1599,9 +1613,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
Handle<JSObject> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
- ArgumentsObjectMaterializationDescriptor descriptor =
- deferred_arguments_objects_.RemoveLast();
- const int length = descriptor.arguments_length();
+ ObjectMaterializationDescriptor descriptor =
+ deferred_objects_.RemoveLast();
+ const int length = descriptor.object_length();
if (arguments.is_null()) {
if (frame->has_adapted_arguments()) {
// Use the arguments adapter frame we just built to materialize the
@@ -1695,7 +1709,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif
-static const char* TraceValueType(bool is_smi, bool is_native) {
+static const char* TraceValueType(bool is_smi, bool is_native = false) {
if (is_native) {
return "native";
} else if (is_smi) {
@@ -1706,6 +1720,197 @@ static const char* TraceValueType(bool is_smi, bool is_native) {
}
+void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
+ int object_opcode,
+ int field_index) {
+ disasm::NameConverter converter;
+ Address object_slot = deferred_objects_.last().slot_address();
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+
+ switch (opcode) {
+ case Translation::BEGIN:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
+ case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
+ case Translation::ARGUMENTS_OBJECT:
+ UNREACHABLE();
+ return;
+
+ case Translation::REGISTER: {
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("0x%08" V8PRIxPTR " ; %s ", input_value,
+ converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
+ }
+ AddObjectTaggedValue(input_value);
+ return;
+ }
+
+ case Translation::INT32_REGISTER: {
+ int input_reg = iterator->Next();
+ intptr_t value = input_->GetRegister(input_reg);
+ bool is_smi = Smi::IsValid(value);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; %s (%s)\n", value,
+ converter.NameOfCPURegister(input_reg),
+ TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<int32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; uint %s (%s)\n", value,
+ converter.NameOfCPURegister(input_reg),
+ TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<uint32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::DOUBLE_REGISTER: {
+ int input_reg = iterator->Next();
+ double value = input_->GetDoubleRegister(input_reg);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%e ; %s\n", value,
+ DoubleRegister::AllocationIndexToString(input_reg));
+ }
+ AddObjectDoubleValue(value);
+ return;
+ }
+
+ case Translation::STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ intptr_t input_value = input_->GetFrameSlot(input_offset);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
+ }
+ AddObjectTaggedValue(input_value);
+ return;
+ }
+
+ case Translation::INT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ intptr_t value = input_->GetFrameSlot(input_offset);
+ bool is_smi = Smi::IsValid(value);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ value, input_offset, TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<int32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ uintptr_t value =
+ static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
+ value, input_offset, TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<uint32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::DOUBLE_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ double value = input_->GetDoubleFrameSlot(input_offset);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%e ; [sp + %d]\n", value, input_offset);
+ }
+ AddObjectDoubleValue(value);
+ return;
+ }
+
+ case Translation::LITERAL: {
+ Object* literal = ComputeLiteral(iterator->Next());
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ literal->ShortPrint();
+ PrintF(" ; literal\n");
+ }
+ intptr_t value = reinterpret_cast<intptr_t>(literal);
+ AddObjectTaggedValue(value);
+ return;
+ }
+ }
+}
+
+
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset,
@@ -1715,14 +1920,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
- // Ignore commands marked as duplicate and act on the first non-duplicate.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
- while (opcode == Translation::DUPLICATE) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
switch (opcode) {
case Translation::BEGIN:
@@ -1732,7 +1931,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
- case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -1758,7 +1956,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);
-
if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
@@ -1836,8 +2033,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
@@ -1855,8 +2051,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);
@@ -1888,8 +2083,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
@@ -1922,8 +2116,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
@@ -1954,31 +2147,24 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
case Translation::ARGUMENTS_OBJECT: {
- bool args_known = iterator->Next();
- int args_index = iterator->Next() + 1; // Skip receiver.
- int args_length = iterator->Next() - 1; // Skip receiver.
+ int length = iterator->Next();
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; %sarguments object\n", args_known ? "" : "dummy ");
+ PrintF(" ; arguments object (length = %d)\n", length);
}
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
- AddArgumentsObject(
- output_[frame_index]->GetTop() + output_offset, args_length);
+ AddObjectStart(output_[frame_index]->GetTop() + output_offset, length);
output_[frame_index]->SetFrameSlot(output_offset, value);
- // We save the tagged argument values on the side and materialize the
- // actual arguments object after the deoptimized frame is built.
- for (int i = 0; i < args_length; i++) {
- unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
- intptr_t input_value = args_known
- ? input_->GetFrameSlot(input_offset)
- : reinterpret_cast<intptr_t>(isolate_->heap()->the_hole_value());
- AddArgumentsObjectValue(input_value);
+ // We save the argument values on the side and materialize the actual
+ // arguments object after the deoptimized frame is built.
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, Translation::ARGUMENTS_OBJECT, i);
}
return;
}
@@ -1998,10 +2184,6 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
- bool duplicate = (opcode == Translation::DUPLICATE);
- if (duplicate) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
switch (opcode) {
case Translation::BEGIN:
@@ -2011,21 +2193,20 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
- case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
+ return false;
+
+ case Translation::REGISTER: {
+ int output_reg = iterator->Next();
+ if (FLAG_trace_osr) {
+ PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
+ converter.NameOfCPURegister(output_reg),
+ input_value,
+ *input_offset);
+ }
+ output->SetRegister(output_reg, input_value);
+ break;
+ }
case Translation::INT32_REGISTER: {
int32_t int32_value = 0;
@@ -2170,7 +2351,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
}
- if (!duplicate) *input_offset -= kPointerSize;
+ *input_offset -= kPointerSize;
return true;
}
@@ -2231,6 +2412,7 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
unoptimized_code->set_back_edges_patched_for_osr(false);
+ unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
#ifdef DEBUG
// Assert that none of the back edges are patched anymore.
Deoptimizer::VerifyInterruptCode(
@@ -2324,15 +2506,22 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
}
-void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
- ArgumentsObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot_address), argc);
- deferred_arguments_objects_.Add(object_desc);
+void Deoptimizer::AddObjectStart(intptr_t slot_address, int length) {
+ ObjectMaterializationDescriptor object_desc(
+ reinterpret_cast<Address>(slot_address), length);
+ deferred_objects_.Add(object_desc);
}
-void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
- deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
+void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
+ deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
+ deferred_objects_double_values_.Add(isolate()->heap()->nan_value()->value());
+}
+
+
+void Deoptimizer::AddObjectDoubleValue(double value) {
+ deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
+ deferred_objects_double_values_.Add(value);
}
@@ -2556,6 +2745,12 @@ void Translation::BeginCompiledStubFrame() {
}
+void Translation::BeginArgumentsObject(int args_length) {
+ buffer_->Add(ARGUMENTS_OBJECT, zone());
+ buffer_->Add(args_length, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@@ -2620,17 +2815,11 @@ void Translation::StoreArgumentsObject(bool args_known,
}
-void Translation::MarkDuplicate() {
- buffer_->Add(DUPLICATE, zone());
-}
-
-
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
- case DUPLICATE:
- return 0;
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
+ case ARGUMENTS_OBJECT:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
@@ -2647,7 +2836,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case CONSTRUCT_STUB_FRAME:
return 2;
case JS_FRAME:
- case ARGUMENTS_OBJECT:
return 3;
}
UNREACHABLE();
@@ -2693,8 +2881,6 @@ const char* Translation::StringFor(Opcode opcode) {
return "LITERAL";
case ARGUMENTS_OBJECT:
return "ARGUMENTS_OBJECT";
- case DUPLICATE:
- return "DUPLICATE";
}
UNREACHABLE();
return "";
@@ -2746,7 +2932,6 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
- case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 5569f7ffd8..d28be236ed 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -75,17 +75,17 @@ class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
};
-class ArgumentsObjectMaterializationDescriptor BASE_EMBEDDED {
+class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
- ArgumentsObjectMaterializationDescriptor(Address slot_address, int argc)
- : slot_address_(slot_address), arguments_length_(argc) { }
+ ObjectMaterializationDescriptor(Address slot_address, int length)
+ : slot_address_(slot_address), object_length_(length) { }
Address slot_address() const { return slot_address_; }
- int arguments_length() const { return arguments_length_; }
+ int object_length() const { return object_length_; }
private:
Address slot_address_;
- int arguments_length_;
+ int object_length_;
};
@@ -369,6 +369,10 @@ class Deoptimizer : public Malloced {
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
+ void DoTranslateObject(TranslationIterator* iterator,
+ int object_opcode,
+ int field_index);
+
enum DeoptimizerTranslatedValueType {
TRANSLATED_VALUE_IS_NATIVE,
TRANSLATED_VALUE_IS_TAGGED
@@ -394,8 +398,9 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
- void AddArgumentsObject(intptr_t slot_address, int argc);
- void AddArgumentsObjectValue(intptr_t value);
+ void AddObjectStart(intptr_t slot_address, int argc);
+ void AddObjectTaggedValue(intptr_t value);
+ void AddObjectDoubleValue(double value);
void AddDoubleValue(intptr_t slot_address, double value);
static void GenerateDeoptimizationEntries(
@@ -446,8 +451,9 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- List<Object*> deferred_arguments_objects_values_;
- List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
+ List<Object*> deferred_objects_tagged_values_;
+ List<double> deferred_objects_double_values_;
+ List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
#ifdef DEBUG
DisallowHeapAllocation* disallow_heap_allocation_;
@@ -698,6 +704,7 @@ class Translation BASE_EMBEDDED {
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
+ ARGUMENTS_OBJECT,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@@ -706,12 +713,7 @@ class Translation BASE_EMBEDDED {
INT32_STACK_SLOT,
UINT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
- LITERAL,
- ARGUMENTS_OBJECT,
-
- // A prefix indicating that the next command is a duplicate of the one
- // that follows it.
- DUPLICATE
+ LITERAL
};
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
@@ -733,6 +735,7 @@ class Translation BASE_EMBEDDED {
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
+ void BeginArgumentsObject(int args_length);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
@@ -743,7 +746,6 @@ class Translation BASE_EMBEDDED {
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
- void MarkDuplicate();
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 3cdbf63e93..c6bf63d72f 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -253,7 +253,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
diff --git a/deps/v8/src/extensions/i18n/break-iterator.cc b/deps/v8/src/extensions/i18n/break-iterator.cc
new file mode 100644
index 0000000000..1225360fb7
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/break-iterator.cc
@@ -0,0 +1,331 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "break-iterator.h"
+
+#include <string.h>
+
+#include "i18n-utils.h"
+#include "unicode/brkiter.h"
+#include "unicode/locid.h"
+#include "unicode/rbbi.h"
+
+namespace v8_i18n {
+
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object>,
+ v8::Handle<v8::Value>);
+static icu::BreakIterator* InitializeBreakIterator(v8::Handle<v8::String>,
+ v8::Handle<v8::Object>,
+ v8::Handle<v8::Object>);
+static icu::BreakIterator* CreateICUBreakIterator(const icu::Locale&,
+ v8::Handle<v8::Object>);
+static void SetResolvedSettings(const icu::Locale&,
+ icu::BreakIterator*,
+ v8::Handle<v8::Object>);
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(
+ v8::Handle<v8::Object> obj) {
+ v8::HandleScope handle_scope;
+
+ // v8::ObjectTemplate doesn't have HasInstance method so we can't check
+ // if obj is an instance of BreakIterator class. We'll check for a property
+ // that has to be in the object. The same applies to other services, like
+ // Collator and DateTimeFormat.
+ if (obj->HasOwnProperty(v8::String::New("breakIterator"))) {
+ return static_cast<icu::BreakIterator*>(
+ obj->GetAlignedPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a break iterator.
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
+ delete UnpackBreakIterator(handle);
+
+ delete static_cast<icu::UnicodeString*>(
+ handle->GetAlignedPointerFromInternalField(1));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose(isolate);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("BreakIterator method called on an object "
+ "that is not a BreakIterator.")));
+}
+
+// Deletes the old value and sets the adopted text in corresponding
+// JavaScript object.
+icu::UnicodeString* ResetAdoptedText(
+ v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
+ // Get the previous value from the internal field.
+ icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
+ obj->GetAlignedPointerFromInternalField(1));
+ delete text;
+
+ // Assign new value to the internal pointer.
+ v8::String::Value text_value(value);
+ text = new icu::UnicodeString(
+ reinterpret_cast<const UChar*>(*text_value), text_value.length());
+ obj->SetAlignedPointerInInternalField(1, text);
+
+ // Return new unicode string pointer.
+ return text;
+}
+
+void BreakIterator::JSInternalBreakIteratorAdoptText(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New(
+ "Internal error. Iterator and text have to be specified.")));
+ return;
+ }
+
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
+ if (!break_iterator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ break_iterator->setText(*ResetAdoptedText(args[0]->ToObject(), args[1]));
+}
+
+void BreakIterator::JSInternalBreakIteratorFirst(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
+ if (!break_iterator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->first()));
+}
+
+void BreakIterator::JSInternalBreakIteratorNext(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
+ if (!break_iterator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->next()));
+}
+
+void BreakIterator::JSInternalBreakIteratorCurrent(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
+ if (!break_iterator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->current()));
+}
+
+void BreakIterator::JSInternalBreakIteratorBreakType(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
+ if (!break_iterator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ v8::Handle<v8::String> result;
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ result = v8::String::New("none");
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ result = v8::String::New("number");
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ result = v8::String::New("letter");
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ result = v8::String::New("kana");
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ result = v8::String::New("ideo");
+ } else {
+ result = v8::String::New("unknown");
+ }
+ args.GetReturnValue().Set(result);
+}
+
+void BreakIterator::JSCreateBreakIterator(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
+ !args[2]->IsObject()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, wrong parameters.")));
+ return;
+ }
+
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> break_iterator_template =
+ Utils::GetTemplate2(isolate);
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object = break_iterator_template->NewInstance();
+ // But the handle shouldn't be empty.
+ // That can happen if there was a stack overflow when creating the object.
+ if (local_object.IsEmpty()) {
+ args.GetReturnValue().Set(local_object);
+ return;
+ }
+
+ // Set break iterator as internal field of the resulting JS object.
+ icu::BreakIterator* break_iterator = InitializeBreakIterator(
+ args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
+
+ if (!break_iterator) {
+ v8::ThrowException(v8::Exception::Error(v8::String::New(
+ "Internal error. Couldn't create ICU break iterator.")));
+ return;
+ } else {
+ local_object->SetAlignedPointerInInternalField(0, break_iterator);
+ // Make sure that the pointer to adopted text is NULL.
+ local_object->SetAlignedPointerInInternalField(1, NULL);
+
+ v8::TryCatch try_catch;
+ local_object->Set(v8::String::New("breakIterator"),
+ v8::String::New("valid"));
+ if (try_catch.HasCaught()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, couldn't set property.")));
+ return;
+ }
+ }
+
+ v8::Persistent<v8::Object> wrapper(isolate, local_object);
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak<void>(NULL, &DeleteBreakIterator);
+ args.GetReturnValue().Set(wrapper);
+ wrapper.ClearAndLeak();
+}
+
+static icu::BreakIterator* InitializeBreakIterator(
+ v8::Handle<v8::String> locale,
+ v8::Handle<v8::Object> options,
+ v8::Handle<v8::Object> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::AsciiValue bcp47_locale(locale);
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::BreakIterator* break_iterator =
+ CreateICUBreakIterator(icu_locale, options);
+ if (!break_iterator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ break_iterator = CreateICUBreakIterator(no_extension_locale, options);
+
+ // Set resolved settings (locale).
+ SetResolvedSettings(no_extension_locale, break_iterator, resolved);
+ } else {
+ SetResolvedSettings(icu_locale, break_iterator, resolved);
+ }
+
+ return break_iterator;
+}
+
+static icu::BreakIterator* CreateICUBreakIterator(
+ const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::BreakIterator* break_iterator = NULL;
+ icu::UnicodeString type;
+ if (!Utils::ExtractStringSetting(options, "type", &type)) {
+ // Type had to be in the options. This would be an internal error.
+ return NULL;
+ }
+
+ if (type == UNICODE_STRING_SIMPLE("character")) {
+ break_iterator =
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
+ break_iterator =
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("line")) {
+ break_iterator =
+ icu::BreakIterator::createLineInstance(icu_locale, status);
+ } else {
+ // Defualt is word iterator.
+ break_iterator =
+ icu::BreakIterator::createWordInstance(icu_locale, status);
+ }
+
+ if (U_FAILURE(status)) {
+ delete break_iterator;
+ return NULL;
+ }
+
+ return break_iterator;
+}
+
+static void SetResolvedSettings(const icu::Locale& icu_locale,
+ icu::BreakIterator* date_format,
+ v8::Handle<v8::Object> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ resolved->Set(v8::String::New("locale"), v8::String::New(result));
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ resolved->Set(v8::String::New("locale"), v8::String::New("und"));
+ }
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/break-iterator.h b/deps/v8/src/extensions/i18n/break-iterator.h
new file mode 100644
index 0000000000..c44c20fbc8
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/break-iterator.h
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
+#define V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class UnicodeString;
+}
+
+namespace v8_i18n {
+
+class BreakIterator {
+ public:
+ static void JSCreateBreakIterator(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks iterator object from corresponding JavaScript object.
+ static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the BreakIterator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteBreakIterator(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param);
+
+ // Assigns new text to the iterator.
+ static void JSInternalBreakIteratorAdoptText(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Moves iterator to the beginning of the string and returns new position.
+ static void JSInternalBreakIteratorFirst(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Moves iterator to the next position and returns it.
+ static void JSInternalBreakIteratorNext(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Returns current iterator's current position.
+ static void JSInternalBreakIteratorCurrent(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Returns type of the item from current position.
+ // This call is only valid for word break iterators. Others just return 0.
+ static void JSInternalBreakIteratorBreakType(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ BreakIterator() {}
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
diff --git a/deps/v8/src/extensions/i18n/break-iterator.js b/deps/v8/src/extensions/i18n/break-iterator.js
new file mode 100644
index 0000000000..eefd8c2ab1
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/break-iterator.js
@@ -0,0 +1,197 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Initializes the given object so it's a valid BreakIterator instance.
+ * Useful for subclassing.
+ */
+function initializeBreakIterator(iterator, locales, options) {
+ native function NativeJSCreateBreakIterator();
+
+ if (iterator.hasOwnProperty('__initializedIntlObject')) {
+ throw new TypeError('Trying to re-initialize v8BreakIterator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'breakiterator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'type', getOption(
+ 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
+
+ var locale = resolveLocale('breakiterator', locales, options);
+ var resolved = Object.defineProperties({}, {
+ requestedLocale: {value: locale.locale, writable: true},
+ type: {value: internalOptions.type, writable: true},
+ locale: {writable: true}
+ });
+
+ var internalIterator = NativeJSCreateBreakIterator(locale.locale,
+ internalOptions,
+ resolved);
+
+ Object.defineProperty(iterator, 'iterator', {value: internalIterator});
+ Object.defineProperty(iterator, 'resolved', {value: resolved});
+ Object.defineProperty(iterator, '__initializedIntlObject',
+ {value: 'breakiterator'});
+
+ return iterator;
+}
+
+
+/**
+ * Constructs Intl.v8BreakIterator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'v8BreakIterator', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.v8BreakIterator(locales, options);
+ }
+
+ return initializeBreakIterator(toObject(this), locales, options);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+
+
+/**
+ * BreakIterator resolvedOptions method.
+ */
+%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'breakiterator') {
+ throw new TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.v8BreakIterator.');
+ }
+
+ var segmenter = this;
+ var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
+ segmenter.resolved.locale);
+
+ return {
+ locale: locale,
+ type: segmenter.resolved.type
+ };
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('breakiterator', locales, arguments[1]);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
+%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
+
+
+/**
+ * Adopts text to segment using the iterator. Old text, if present,
+ * gets discarded.
+ */
+function adoptText(iterator, text) {
+ native function NativeJSBreakIteratorAdoptText();
+ NativeJSBreakIteratorAdoptText(iterator.iterator, String(text));
+}
+
+
+/**
+ * Returns index of the first break in the string and moves current pointer.
+ */
+function first(iterator) {
+ native function NativeJSBreakIteratorFirst();
+ return NativeJSBreakIteratorFirst(iterator.iterator);
+}
+
+
+/**
+ * Returns the index of the next break and moves the pointer.
+ */
+function next(iterator) {
+ native function NativeJSBreakIteratorNext();
+ return NativeJSBreakIteratorNext(iterator.iterator);
+}
+
+
+/**
+ * Returns index of the current break.
+ */
+function current(iterator) {
+ native function NativeJSBreakIteratorCurrent();
+ return NativeJSBreakIteratorCurrent(iterator.iterator);
+}
+
+
+/**
+ * Returns type of the current break.
+ */
+function breakType(iterator) {
+ native function NativeJSBreakIteratorBreakType();
+ return NativeJSBreakIteratorBreakType(iterator.iterator);
+}
+
+
+addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
diff --git a/deps/v8/src/extensions/i18n/collator.cc b/deps/v8/src/extensions/i18n/collator.cc
new file mode 100644
index 0000000000..4ffa4145fb
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/collator.cc
@@ -0,0 +1,363 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "collator.h"
+
+#include "i18n-utils.h"
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8_i18n {
+
+static icu::Collator* InitializeCollator(
+ v8::Handle<v8::String>, v8::Handle<v8::Object>, v8::Handle<v8::Object>);
+
+static icu::Collator* CreateICUCollator(
+ const icu::Locale&, v8::Handle<v8::Object>);
+
+static bool SetBooleanAttribute(
+ UColAttribute, const char*, v8::Handle<v8::Object>, icu::Collator*);
+
+static void SetResolvedSettings(
+ const icu::Locale&, icu::Collator*, v8::Handle<v8::Object>);
+
+static void SetBooleanSetting(
+ UColAttribute, icu::Collator*, const char*, v8::Handle<v8::Object>);
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+ v8::HandleScope handle_scope;
+
+ if (obj->HasOwnProperty(v8::String::New("collator"))) {
+ return static_cast<icu::Collator*>(
+ obj->GetAlignedPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void Collator::DeleteCollator(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a collator.
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
+ delete UnpackCollator(handle);
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose(isolate);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Collator method called on an object "
+ "that is not a Collator.")));
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+ return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+// static
+void Collator::JSInternalCompare(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsObject() ||
+ !args[1]->IsString() || !args[2]->IsString()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Collator and two string arguments are required.")));
+ return;
+ }
+
+ icu::Collator* collator = UnpackCollator(args[0]->ToObject());
+ if (!collator) {
+ ThrowUnexpectedObjectError();
+ return;
+ }
+
+ v8::String::Value string_value1(args[1]);
+ v8::String::Value string_value2(args[2]);
+ const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+ const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+ UErrorCode status = U_ZERO_ERROR;
+ UCollationResult result = collator->compare(
+ string1, string_value1.length(), string2, string_value2.length(), status);
+
+ if (U_FAILURE(status)) {
+ ThrowExceptionForICUError(
+ "Internal error. Unexpected failure in Collator.compare.");
+ return;
+ }
+
+ args.GetReturnValue().Set(result);
+}
+
+void Collator::JSCreateCollator(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
+ !args[2]->IsObject()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error, wrong parameters.")));
+ return;
+ }
+
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> intl_collator_template =
+ Utils::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object = intl_collator_template->NewInstance();
+ // But the handle shouldn't be empty.
+ // That can happen if there was a stack overflow when creating the object.
+ if (local_object.IsEmpty()) {
+ args.GetReturnValue().Set(local_object);
+ return;
+ }
+
+ // Set collator as internal field of the resulting JS object.
+ icu::Collator* collator = InitializeCollator(
+ args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
+
+ if (!collator) {
+ v8::ThrowException(v8::Exception::Error(v8::String::New(
+ "Internal error. Couldn't create ICU collator.")));
+ return;
+ } else {
+ local_object->SetAlignedPointerInInternalField(0, collator);
+
+ // Make it safer to unpack later on.
+ v8::TryCatch try_catch;
+ local_object->Set(v8::String::New("collator"), v8::String::New("valid"));
+ if (try_catch.HasCaught()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, couldn't set property.")));
+ return;
+ }
+ }
+
+ v8::Persistent<v8::Object> wrapper(isolate, local_object);
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak<void>(NULL, &DeleteCollator);
+ args.GetReturnValue().Set(wrapper);
+ wrapper.ClearAndLeak();
+}
+
+static icu::Collator* InitializeCollator(v8::Handle<v8::String> locale,
+ v8::Handle<v8::Object> options,
+ v8::Handle<v8::Object> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::AsciiValue bcp47_locale(locale);
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::Collator* collator = CreateICUCollator(icu_locale, options);
+ if (!collator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ collator = CreateICUCollator(no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedSettings(no_extension_locale, collator, resolved);
+ } else {
+ SetResolvedSettings(icu_locale, collator, resolved);
+ }
+
+ return collator;
+}
+
+static icu::Collator* CreateICUCollator(
+ const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
+ // Make collator from options.
+ icu::Collator* collator = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ collator = icu::Collator::createInstance(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete collator;
+ return NULL;
+ }
+
+ // Set flags first, and then override them with sensitivity if necessary.
+ SetBooleanAttribute(UCOL_NUMERIC_COLLATION, "numeric", options, collator);
+
+ // Normalization is always on, by the spec. We are free to optimize
+ // if the strings are already normalized (but we don't have a way to tell
+ // that right now).
+ collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
+
+ icu::UnicodeString case_first;
+ if (Utils::ExtractStringSetting(options, "caseFirst", &case_first)) {
+ if (case_first == UNICODE_STRING_SIMPLE("upper")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
+ } else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
+ } else {
+ // Default (false/off).
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
+ }
+ }
+
+ icu::UnicodeString sensitivity;
+ if (Utils::ExtractStringSetting(options, "sensitivity", &sensitivity)) {
+ if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
+ } else {
+ // variant (default)
+ collator->setStrength(icu::Collator::TERTIARY);
+ }
+ }
+
+ bool ignore;
+ if (Utils::ExtractBooleanSetting(options, "ignorePunctuation", &ignore)) {
+ if (ignore) {
+ collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
+ }
+ }
+
+ return collator;
+}
+
+static bool SetBooleanAttribute(UColAttribute attribute,
+ const char* name,
+ v8::Handle<v8::Object> options,
+ icu::Collator* collator) {
+ UErrorCode status = U_ZERO_ERROR;
+ bool result;
+ if (Utils::ExtractBooleanSetting(options, name, &result)) {
+ collator->setAttribute(attribute, result ? UCOL_ON : UCOL_OFF, status);
+ if (U_FAILURE(status)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void SetResolvedSettings(const icu::Locale& icu_locale,
+ icu::Collator* collator,
+ v8::Handle<v8::Object> resolved) {
+ SetBooleanSetting(UCOL_NUMERIC_COLLATION, collator, "numeric", resolved);
+
+ UErrorCode status = U_ZERO_ERROR;
+
+ switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
+ case UCOL_LOWER_FIRST:
+ resolved->Set(v8::String::New("caseFirst"), v8::String::New("lower"));
+ break;
+ case UCOL_UPPER_FIRST:
+ resolved->Set(v8::String::New("caseFirst"), v8::String::New("upper"));
+ break;
+ default:
+ resolved->Set(v8::String::New("caseFirst"), v8::String::New("false"));
+ }
+
+ switch (collator->getAttribute(UCOL_STRENGTH, status)) {
+ case UCOL_PRIMARY: {
+ resolved->Set(v8::String::New("strength"), v8::String::New("primary"));
+
+ // case level: true + s1 -> case, s1 -> base.
+ if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("case"));
+ } else {
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("base"));
+ }
+ break;
+ }
+ case UCOL_SECONDARY:
+ resolved->Set(v8::String::New("strength"), v8::String::New("secondary"));
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("accent"));
+ break;
+ case UCOL_TERTIARY:
+ resolved->Set(v8::String::New("strength"), v8::String::New("tertiary"));
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
+ break;
+ case UCOL_QUATERNARY:
+ // We shouldn't get quaternary and identical from ICU, but if we do
+ // put them into variant.
+ resolved->Set(v8::String::New("strength"), v8::String::New("quaternary"));
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
+ break;
+ default:
+ resolved->Set(v8::String::New("strength"), v8::String::New("identical"));
+ resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
+ }
+
+ if (UCOL_SHIFTED == collator->getAttribute(UCOL_ALTERNATE_HANDLING, status)) {
+ resolved->Set(v8::String::New("ignorePunctuation"),
+ v8::Boolean::New(true));
+ } else {
+ resolved->Set(v8::String::New("ignorePunctuation"),
+ v8::Boolean::New(false));
+ }
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ resolved->Set(v8::String::New("locale"), v8::String::New(result));
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ resolved->Set(v8::String::New("locale"), v8::String::New("und"));
+ }
+}
+
+static void SetBooleanSetting(UColAttribute attribute,
+ icu::Collator* collator,
+ const char* property,
+ v8::Handle<v8::Object> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+ if (UCOL_ON == collator->getAttribute(attribute, status)) {
+ resolved->Set(v8::String::New(property), v8::Boolean::New(true));
+ } else {
+ resolved->Set(v8::String::New(property), v8::Boolean::New(false));
+ }
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/collator.h b/deps/v8/src/extensions/i18n/collator.h
new file mode 100644
index 0000000000..a3991b9ed2
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/collator.h
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_COLLATOR_H_
+#define V8_EXTENSIONS_I18N_COLLATOR_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8_i18n {
+
+class Collator {
+ public:
+ static void JSCreateCollator(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks collator object from corresponding JavaScript object.
+ static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the Collator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteCollator(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param);
+
+ // Compare two strings and returns -1, 0 and 1 depending on
+ // whether string1 is smaller than, equal to or larger than string2.
+ static void JSInternalCompare(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ Collator() {}
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_COLLATOR_H_
diff --git a/deps/v8/src/extensions/i18n/collator.js b/deps/v8/src/extensions/i18n/collator.js
new file mode 100644
index 0000000000..3483515bef
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/collator.js
@@ -0,0 +1,212 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Initializes the given object so it's a valid Collator instance.
+ * Useful for subclassing.
+ */
+function initializeCollator(collator, locales, options) {
+ native function NativeJSCreateCollator();
+
+ if (collator.hasOwnProperty('__initializedIntlObject')) {
+ throw new TypeError('Trying to re-initialize Collator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'collator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'usage', getOption(
+ 'usage', 'string', ['sort', 'search'], 'sort'));
+
+ var sensitivity = getOption('sensitivity', 'string',
+ ['base', 'accent', 'case', 'variant']);
+ if (sensitivity === undefined && internalOptions.usage === 'sort') {
+ sensitivity = 'variant';
+ }
+ defineWEProperty(internalOptions, 'sensitivity', sensitivity);
+
+ defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
+ 'ignorePunctuation', 'boolean', undefined, false));
+
+ var locale = resolveLocale('collator', locales, options);
+
+ // ICU can't take kb, kc... parameters through localeID, so we need to pass
+ // them as options.
+ // One exception is -co- which has to be part of the extension, but only for
+ // usage: sort, and its value can't be 'standard' or 'search'.
+ var extensionMap = parseExtension(locale.extension);
+ setOptions(
+ options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
+
+ var collation = 'default';
+ var extension = '';
+ if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
+ if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
+ extension = '-u-co-' + extensionMap.co;
+ // ICU can't tell us what the collation is, so save user's input.
+ collation = extensionMap.co;
+ }
+ } else if (internalOptions.usage === 'search') {
+ extension = '-u-co-search';
+ }
+ defineWEProperty(internalOptions, 'collation', collation);
+
+ var requestedLocale = locale.locale + extension;
+
+ // We define all properties C++ code may produce, to prevent security
+ // problems. If malicious user decides to redefine Object.prototype.locale
+ // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
+ // Object.defineProperties will either succeed defining or throw an error.
+ var resolved = Object.defineProperties({}, {
+ caseFirst: {writable: true},
+ collation: {value: internalOptions.collation, writable: true},
+ ignorePunctuation: {writable: true},
+ locale: {writable: true},
+ numeric: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ sensitivity: {writable: true},
+ strength: {writable: true},
+ usage: {value: internalOptions.usage, writable: true}
+ });
+
+ var internalCollator = NativeJSCreateCollator(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // Writable, configurable and enumerable are set to false by default.
+ Object.defineProperty(collator, 'collator', {value: internalCollator});
+ Object.defineProperty(collator, '__initializedIntlObject',
+ {value: 'collator'});
+ Object.defineProperty(collator, 'resolved', {value: resolved});
+
+ return collator;
+}
+
+
+/**
+ * Constructs Intl.Collator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'Collator', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.Collator(locales, options);
+ }
+
+ return initializeCollator(toObject(this), locales, options);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+
+
+/**
+ * Collator resolvedOptions method.
+ */
+%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'collator') {
+ throw new TypeError('resolvedOptions method called on a non-object ' +
+ 'or on a object that is not Intl.Collator.');
+ }
+
+ var coll = this;
+ var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
+ coll.resolved.locale);
+
+ return {
+ locale: locale,
+ usage: coll.resolved.usage,
+ sensitivity: coll.resolved.sensitivity,
+ ignorePunctuation: coll.resolved.ignorePunctuation,
+ numeric: coll.resolved.numeric,
+ caseFirst: coll.resolved.caseFirst,
+ collation: coll.resolved.collation
+ };
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('collator', locales, arguments[1]);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
+%SetNativeFlag(Intl.Collator.supportedLocalesOf);
+
+
+/**
+ * When the compare method is called with two arguments x and y, it returns a
+ * Number other than NaN that represents the result of a locale-sensitive
+ * String comparison of x with y.
+ * The result is intended to order String values in the sort order specified
+ * by the effective locale and collation options computed during construction
+ * of this Collator object, and will be negative, zero, or positive, depending
+ * on whether x comes before y in the sort order, the Strings are equal under
+ * the sort order, or x comes after y in the sort order, respectively.
+ */
+function compare(collator, x, y) {
+ native function NativeJSInternalCompare();
+ return NativeJSInternalCompare(collator.collator, String(x), String(y));
+};
+
+
+addBoundMethod(Intl.Collator, 'compare', compare, 2);
diff --git a/deps/v8/src/extensions/i18n/date-format.cc b/deps/v8/src/extensions/i18n/date-format.cc
new file mode 100644
index 0000000000..1058e37a58
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/date-format.cc
@@ -0,0 +1,329 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "date-format.h"
+
+#include <string.h>
+
+#include "i18n-utils.h"
+#include "unicode/calendar.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numsys.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+
+namespace v8_i18n {
+
+static icu::SimpleDateFormat* InitializeDateTimeFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::Object>,
+ v8::Handle<v8::Object>);
+static icu::SimpleDateFormat* CreateICUDateFormat(const icu::Locale&,
+ v8::Handle<v8::Object>);
+static void SetResolvedSettings(const icu::Locale&,
+ icu::SimpleDateFormat*,
+ v8::Handle<v8::Object>);
+
+icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
+ v8::Handle<v8::Object> obj) {
+ v8::HandleScope handle_scope;
+
+ if (obj->HasOwnProperty(v8::String::New("dateFormat"))) {
+ return static_cast<icu::SimpleDateFormat*>(
+ obj->GetAlignedPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a date time formatter.
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
+ delete UnpackDateFormat(handle);
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose(isolate);
+}
+
+void DateFormat::JSInternalFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ double millis = 0.0;
+ if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsDate()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New(
+ "Internal error. Formatter and date value have to be specified.")));
+ return;
+ } else {
+ millis = v8::Date::Cast(*args[1])->NumberValue();
+ }
+
+ icu::SimpleDateFormat* date_format = UnpackDateFormat(args[0]->ToObject());
+ if (!date_format) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("DateTimeFormat method called on an object "
+ "that is not a DateTimeFormat.")));
+ return;
+ }
+
+ icu::UnicodeString result;
+ date_format->format(millis, result);
+
+ args.GetReturnValue().Set(v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+void DateFormat::JSInternalParse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::UnicodeString string_date;
+ if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New(
+ "Internal error. Formatter and string have to be specified.")));
+ return;
+ } else {
+ if (!Utils::V8StringToUnicodeString(args[1], &string_date)) {
+ string_date = "";
+ }
+ }
+
+ icu::SimpleDateFormat* date_format = UnpackDateFormat(args[0]->ToObject());
+ if (!date_format) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("DateTimeFormat method called on an object "
+ "that is not a DateTimeFormat.")));
+ return;
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ UDate date = date_format->parse(string_date, status);
+ if (U_FAILURE(status)) {
+ return;
+ }
+
+ args.GetReturnValue().Set(v8::Date::New(static_cast<double>(date)));
+}
+
+void DateFormat::JSCreateDateTimeFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 ||
+ !args[0]->IsString() ||
+ !args[1]->IsObject() ||
+ !args[2]->IsObject()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, wrong parameters.")));
+ return;
+ }
+
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> date_format_template =
+ Utils::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object = date_format_template->NewInstance();
+ // But the handle shouldn't be empty.
+ // That can happen if there was a stack overflow when creating the object.
+ if (local_object.IsEmpty()) {
+ args.GetReturnValue().Set(local_object);
+ return;
+ }
+
+ // Set date time formatter as internal field of the resulting JS object.
+ icu::SimpleDateFormat* date_format = InitializeDateTimeFormat(
+ args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
+
+ if (!date_format) {
+ v8::ThrowException(v8::Exception::Error(v8::String::New(
+ "Internal error. Couldn't create ICU date time formatter.")));
+ return;
+ } else {
+ local_object->SetAlignedPointerInInternalField(0, date_format);
+
+ v8::TryCatch try_catch;
+ local_object->Set(v8::String::New("dateFormat"), v8::String::New("valid"));
+ if (try_catch.HasCaught()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, couldn't set property.")));
+ return;
+ }
+ }
+
+ v8::Persistent<v8::Object> wrapper(isolate, local_object);
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak<void>(NULL, &DeleteDateFormat);
+ args.GetReturnValue().Set(wrapper);
+ wrapper.ClearAndLeak();
+}
+
+static icu::SimpleDateFormat* InitializeDateTimeFormat(
+ v8::Handle<v8::String> locale,
+ v8::Handle<v8::Object> options,
+ v8::Handle<v8::Object> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::AsciiValue bcp47_locale(locale);
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::SimpleDateFormat* date_format = CreateICUDateFormat(icu_locale, options);
+ if (!date_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ date_format = CreateICUDateFormat(no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system, calendar).
+ SetResolvedSettings(no_extension_locale, date_format, resolved);
+ } else {
+ SetResolvedSettings(icu_locale, date_format, resolved);
+ }
+
+ return date_format;
+}
+
+static icu::SimpleDateFormat* CreateICUDateFormat(
+ const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
+ // Create time zone as specified by the user. We have to re-create time zone
+ // since calendar takes ownership.
+ icu::TimeZone* tz = NULL;
+ icu::UnicodeString timezone;
+ if (Utils::ExtractStringSetting(options, "timeZone", &timezone)) {
+ tz = icu::TimeZone::createTimeZone(timezone);
+ } else {
+ tz = icu::TimeZone::createDefault();
+ }
+
+ // Create a calendar using locale, and apply time zone to it.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Calendar* calendar =
+ icu::Calendar::createInstance(tz, icu_locale, status);
+
+ // Make formatter from skeleton. Calendar and numbering system are added
+ // to the locale as Unicode extension (if they were specified at all).
+ icu::SimpleDateFormat* date_format = NULL;
+ icu::UnicodeString skeleton;
+ if (Utils::ExtractStringSetting(options, "skeleton", &skeleton)) {
+ icu::DateTimePatternGenerator* generator =
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status);
+ icu::UnicodeString pattern;
+ if (U_SUCCESS(status)) {
+ pattern = generator->getBestPattern(skeleton, status);
+ delete generator;
+ }
+
+ date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+ if (U_SUCCESS(status)) {
+ date_format->adoptCalendar(calendar);
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete calendar;
+ delete date_format;
+ date_format = NULL;
+ }
+
+ return date_format;
+}
+
+static void SetResolvedSettings(const icu::Locale& icu_locale,
+ icu::SimpleDateFormat* date_format,
+ v8::Handle<v8::Object> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ resolved->Set(v8::String::New("pattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+
+ // Set time zone and calendar.
+ if (date_format) {
+ const icu::Calendar* calendar = date_format->getCalendar();
+ const char* calendar_name = calendar->getType();
+ resolved->Set(v8::String::New("calendar"), v8::String::New(calendar_name));
+
+ const icu::TimeZone& tz = calendar->getTimeZone();
+ icu::UnicodeString time_zone;
+ tz.getID(time_zone);
+
+ icu::UnicodeString canonical_time_zone;
+ icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
+ if (U_SUCCESS(status)) {
+ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ resolved->Set(v8::String::New("timeZone"), v8::String::New("UTC"));
+ } else {
+ resolved->Set(v8::String::New("timeZone"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ canonical_time_zone.getBuffer()),
+ canonical_time_zone.length()));
+ }
+ }
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ resolved->Set(v8::String::New("numberingSystem"), v8::String::New(ns));
+ } else {
+ resolved->Set(v8::String::New("numberingSystem"), v8::Undefined());
+ }
+ delete numbering_system;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ resolved->Set(v8::String::New("locale"), v8::String::New(result));
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ resolved->Set(v8::String::New("locale"), v8::String::New("und"));
+ }
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/date-format.h b/deps/v8/src/extensions/i18n/date-format.h
new file mode 100644
index 0000000000..daa5964e25
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/date-format.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_DATE_FORMAT_H_
+#define V8_EXTENSIONS_I18N_DATE_FORMAT_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class SimpleDateFormat;
+}
+
+namespace v8_i18n {
+
+class DateFormat {
+ public:
+ static void JSCreateDateTimeFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::SimpleDateFormat* UnpackDateFormat(
+ v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the DateFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteDateFormat(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param);
+
+ // Formats date and returns corresponding string.
+ static void JSInternalFormat(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Parses date and returns corresponding Date object or undefined if parse
+ // failed.
+ static void JSInternalParse(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ DateFormat();
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_DATE_FORMAT_H_
diff --git a/deps/v8/src/extensions/i18n/date-format.js b/deps/v8/src/extensions/i18n/date-format.js
new file mode 100644
index 0000000000..04e7a7c7b9
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/date-format.js
@@ -0,0 +1,478 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Returns a string that matches LDML representation of the options object.
+ */
+function toLDMLString(options) {
+ var getOption = getGetOption(options, 'dateformat');
+
+ var ldmlString = '';
+
+ var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
+
+ option = getOption('era', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
+
+ option = getOption('year', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
+
+ option = getOption('month', 'string',
+ ['2-digit', 'numeric', 'narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
+ 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
+
+ option = getOption('day', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(
+ option, {'2-digit': 'dd', 'numeric': 'd'});
+
+ var hr12 = getOption('hour12', 'boolean');
+ option = getOption('hour', 'string', ['2-digit', 'numeric']);
+ if (hr12 === undefined) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
+ } else if (hr12 === true) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
+ } else {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
+ }
+
+ option = getOption('minute', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
+
+ option = getOption('second', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
+
+ option = getOption('timeZoneName', 'string', ['short', 'long']);
+ ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
+
+ return ldmlString;
+}
+
+
+/**
+ * Returns either LDML equivalent of the current option or empty string.
+ */
+function appendToLDMLString(option, pairs) {
+ if (option !== undefined) {
+ return pairs[option];
+ } else {
+ return '';
+ }
+}
+
+
+/**
+ * Returns object that matches LDML representation of the date.
+ */
+function fromLDMLString(ldmlString) {
+ // First remove '' quoted text, so we lose 'Uhr' strings.
+ ldmlString = ldmlString.replace(QUOTED_STRING_RE, '');
+
+ var options = {};
+ var match = ldmlString.match(/E{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
+
+ match = ldmlString.match(/G{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
+
+ match = ldmlString.match(/y{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'year', match, {y: 'numeric', yy: '2-digit'});
+
+ match = ldmlString.match(/M{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
+ M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
+
+ // Sometimes we get L instead of M for month - standalone name.
+ match = ldmlString.match(/L{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
+ L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
+
+ match = ldmlString.match(/d{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'day', match, {d: 'numeric', dd: '2-digit'});
+
+ match = ldmlString.match(/h{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = true;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {h: 'numeric', hh: '2-digit'});
+
+ match = ldmlString.match(/H{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = false;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {H: 'numeric', HH: '2-digit'});
+
+ match = ldmlString.match(/m{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'minute', match, {m: 'numeric', mm: '2-digit'});
+
+ match = ldmlString.match(/s{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'second', match, {s: 'numeric', ss: '2-digit'});
+
+ match = ldmlString.match(/v{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'timeZoneName', match, {v: 'short', vv: 'long'});
+
+ return options;
+}
+
+
+function appendToDateTimeObject(options, option, match, pairs) {
+ if (match === null) {
+ if (!options.hasOwnProperty(option)) {
+ defineWEProperty(options, option, undefined);
+ }
+ return options;
+ }
+
+ var property = match[0];
+ defineWEProperty(options, option, pairs[property]);
+
+ return options;
+}
+
+
+/**
+ * Returns options with at least default values in it.
+ */
+function toDateTimeOptions(options, required, defaults) {
+ if (options === undefined) {
+ options = null;
+ } else {
+ options = toObject(options);
+ }
+
+ options = Object.apply(this, [options]);
+
+ var needsDefault = true;
+ if ((required === 'date' || required === 'any') &&
+ (options.weekday !== undefined || options.year !== undefined ||
+ options.month !== undefined || options.day !== undefined)) {
+ needsDefault = false;
+ }
+
+ if ((required === 'time' || required === 'any') &&
+ (options.hour !== undefined || options.minute !== undefined ||
+ options.second !== undefined)) {
+ needsDefault = false;
+ }
+
+ if (needsDefault && (defaults === 'date' || defaults === 'all')) {
+ Object.defineProperty(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ Object.defineProperty(options, 'month', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ Object.defineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ Object.defineProperty(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ Object.defineProperty(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ Object.defineProperty(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ return options;
+}
+
+
+/**
+ * Initializes the given object so it's a valid DateTimeFormat instance.
+ * Useful for subclassing.
+ */
+function initializeDateTimeFormat(dateFormat, locales, options) {
+ native function NativeJSCreateDateTimeFormat();
+
+ if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new TypeError('Trying to re-initialize DateTimeFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var locale = resolveLocale('dateformat', locales, options);
+
+ options = toDateTimeOptions(options, 'any', 'date');
+
+ var getOption = getGetOption(options, 'dateformat');
+
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ var matcher = getOption('formatMatcher', 'string',
+ ['basic', 'best fit'], 'best fit');
+
+ // Build LDML string for the skeleton that we pass to the formatter.
+ var ldmlString = toLDMLString(options);
+
+ // Filter out supported extension keys so we know what to put in resolved
+ // section later on.
+ // We need to pass calendar and number system to the method.
+ var tz = canonicalizeTimeZoneID(options.timeZone);
+
+ // ICU prefers options to be passed using -u- extension key/values, so
+ // we need to build that.
+ var internalOptions = {};
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = Object.defineProperties({}, {
+ calendar: {writable: true},
+ day: {writable: true},
+ era: {writable: true},
+ hour12: {writable: true},
+ hour: {writable: true},
+ locale: {writable: true},
+ minute: {writable: true},
+ month: {writable: true},
+ numberingSystem: {writable: true},
+ pattern: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ second: {writable: true},
+ timeZone: {writable: true},
+ timeZoneName: {writable: true},
+ tz: {value: tz, writable: true},
+ weekday: {writable: true},
+ year: {writable: true}
+ });
+
+ var formatter = NativeJSCreateDateTimeFormat(
+ requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
+
+ if (tz !== undefined && tz !== resolved.timeZone) {
+ throw new RangeError('Unsupported time zone specified ' + tz);
+ }
+
+ Object.defineProperty(dateFormat, 'formatter', {value: formatter});
+ Object.defineProperty(dateFormat, 'resolved', {value: resolved});
+ Object.defineProperty(dateFormat, '__initializedIntlObject',
+ {value: 'dateformat'});
+
+ return dateFormat;
+}
+
+
+/**
+ * Constructs Intl.DateTimeFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'DateTimeFormat', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.DateTimeFormat(locales, options);
+ }
+
+ return initializeDateTimeFormat(toObject(this), locales, options);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+
+
+/**
+ * DateTimeFormat resolvedOptions method.
+ */
+%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'dateformat') {
+ throw new TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.DateTimeFormat.');
+ }
+
+ var format = this;
+ var fromPattern = fromLDMLString(format.resolved.pattern);
+ var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ if (userCalendar === undefined) {
+ // Use ICU name if we don't have a match. It shouldn't happen, but
+ // it would be too strict to throw for this.
+ userCalendar = format.resolved.calendar;
+ }
+
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ calendar: userCalendar,
+ timeZone: format.resolved.timeZone
+ };
+
+ addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
+ addWECPropertyIfDefined(result, 'era', fromPattern.era);
+ addWECPropertyIfDefined(result, 'year', fromPattern.year);
+ addWECPropertyIfDefined(result, 'month', fromPattern.month);
+ addWECPropertyIfDefined(result, 'day', fromPattern.day);
+ addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
+ addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
+ addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
+ addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
+ addWECPropertyIfDefined(result, 'second', fromPattern.second);
+
+ return result;
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('dateformat', locales, arguments[1]);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(date)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ */
+function formatDate(formatter, dateValue) {
+ native function NativeJSInternalDateFormat();
+
+ var dateMs;
+ if (dateValue === undefined) {
+ dateMs = Date.now();
+ } else {
+ dateMs = Number(dateValue);
+ }
+
+ if (!isFinite(dateMs)) {
+ throw new RangeError('Provided date is not in valid range.');
+ }
+
+ return NativeJSInternalDateFormat(formatter.formatter, new Date(dateMs));
+}
+
+
+/**
+ * Returns a Date object representing the result of calling ToString(value)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ * Returns undefined if date string cannot be parsed.
+ */
+function parseDate(formatter, value) {
+ native function NativeJSInternalDateParse();
+ return NativeJSInternalDateParse(formatter.formatter, String(value));
+}
+
+
+// 0 because date is optional argument.
+addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+
+
+/**
+ * Returns canonical Area/Location name, or throws an exception if the zone
+ * name is invalid IANA name.
+ */
+function canonicalizeTimeZoneID(tzID) {
+ // Skip undefined zones.
+ if (tzID === undefined) {
+ return tzID;
+ }
+
+ // Special case handling (UTC, GMT).
+ var upperID = tzID.toUpperCase();
+ if (upperID === 'UTC' || upperID === 'GMT' ||
+ upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
+ return 'UTC';
+ }
+
+ // We expect only _ and / beside ASCII letters.
+ // All inputs should conform to Area/Location from now on.
+ var match = TIMEZONE_NAME_CHECK_RE.exec(tzID);
+ if (match === null) {
+ throw new RangeError('Expected Area/Location for time zone, got ' + tzID);
+ }
+
+ var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
+ var i = 3;
+ while (match[i] !== undefined && i < match.length) {
+ result = result + '_' + toTitleCaseWord(match[i]);
+ i++;
+ }
+
+ return result;
+}
diff --git a/deps/v8/src/extensions/i18n/footer.js b/deps/v8/src/extensions/i18n/footer.js
new file mode 100644
index 0000000000..ac33f1e242
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/footer.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+// Fix RegExp global state so we don't fail WebKit layout test:
+// fast/js/regexp-caching.html
+// It seems that 'g' or test() operations leave state changed.
+var CLEANUP_RE = new RegExp('');
+CLEANUP_RE.test('');
+
+return Intl;
+}());
diff --git a/deps/v8/src/extensions/i18n/globals.js b/deps/v8/src/extensions/i18n/globals.js
new file mode 100644
index 0000000000..68fabe777f
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/globals.js
@@ -0,0 +1,168 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+
+/**
+ * List of available services.
+ */
+var AVAILABLE_SERVICES = ['collator',
+ 'numberformat',
+ 'dateformat',
+ 'breakiterator'];
+
+/**
+ * Caches available locales for each service.
+ */
+var AVAILABLE_LOCALES = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformat': undefined,
+ 'breakiterator': undefined
+};
+
+/**
+ * Caches default ICU locale.
+ */
+var DEFAULT_ICU_LOCALE = undefined;
+
+/**
+ * Unicode extension regular expression.
+ */
+var UNICODE_EXTENSION_RE = new RegExp('-u(-[a-z0-9]{2,8})+', 'g');
+
+/**
+ * Matches any Unicode extension.
+ */
+var ANY_EXTENSION_RE = new RegExp('-[a-z0-9]{1}-.*', 'g');
+
+/**
+ * Replace quoted text (single quote, anything but the quote and quote again).
+ */
+var QUOTED_STRING_RE = new RegExp("'[^']+'", 'g');
+
+/**
+ * Matches valid service name.
+ */
+var SERVICE_RE =
+ new RegExp('^(collator|numberformat|dateformat|breakiterator)$');
+
+/**
+ * Validates a language tag against bcp47 spec.
+ * Actual value is assigned on first run.
+ */
+var LANGUAGE_TAG_RE = undefined;
+
+/**
+ * Helps find duplicate variants in the language tag.
+ */
+var LANGUAGE_VARIANT_RE = undefined;
+
+/**
+ * Helps find duplicate singletons in the language tag.
+ */
+var LANGUAGE_SINGLETON_RE = undefined;
+
+/**
+ * Matches valid IANA time zone names.
+ */
+var TIMEZONE_NAME_CHECK_RE =
+ new RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+
+/**
+ * Maps ICU calendar names into LDML type.
+ */
+var ICU_CALENDAR_MAP = {
+ 'gregorian': 'gregory',
+ 'japanese': 'japanese',
+ 'buddhist': 'buddhist',
+ 'roc': 'roc',
+ 'persian': 'persian',
+ 'islamic-civil': 'islamicc',
+ 'islamic': 'islamic',
+ 'hebrew': 'hebrew',
+ 'chinese': 'chinese',
+ 'indian': 'indian',
+ 'coptic': 'coptic',
+ 'ethiopic': 'ethiopic',
+ 'ethiopic-amete-alem': 'ethioaa'
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a collator.
+ */
+var COLLATOR_KEY_MAP = {
+ 'kn': {'property': 'numeric', 'type': 'boolean'},
+ 'kf': {'property': 'caseFirst', 'type': 'string',
+ 'values': ['false', 'lower', 'upper']}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a number format.
+ */
+var NUMBER_FORMAT_KEY_MAP = {
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a date/time format.
+ */
+var DATETIME_FORMAT_KEY_MAP = {
+ 'ca': {'property': undefined, 'type': 'string'},
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Allowed -u-co- values. List taken from:
+ * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
+ */
+var ALLOWED_CO_VALUES = [
+ 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
+ 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
+];
+
+/**
+ * Object attributes (configurable, writable, enumerable).
+ * To combine attributes, OR them.
+ * Values/names are copied from v8/include/v8.h:PropertyAttribute
+ */
+var ATTRIBUTES = {
+ 'NONE': 0,
+ 'READ_ONLY': 1,
+ 'DONT_ENUM': 2,
+ 'DONT_DELETE': 4
+};
+
+/**
+ * Error message for when function object is created with new and it's not
+ * a constructor.
+ */
+var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
+ 'Function object that\'s not a constructor was created with new';
diff --git a/deps/v8/src/extensions/i18n/header.js b/deps/v8/src/extensions/i18n/header.js
new file mode 100644
index 0000000000..1c0a2d8874
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/header.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Intl object is a single object that has some named properties,
+ * all of which are constructors.
+ */
+var Intl = (function() {
+
+'use strict';
+
+var Intl = {};
diff --git a/deps/v8/src/extensions/i18n/i18n-extension.cc b/deps/v8/src/extensions/i18n/i18n-extension.cc
new file mode 100644
index 0000000000..eb7652eae8
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/i18n-extension.cc
@@ -0,0 +1,116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "i18n-extension.h"
+
+#include "break-iterator.h"
+#include "collator.h"
+#include "date-format.h"
+#include "locale.h"
+#include "natives.h"
+#include "number-format.h"
+
+using v8::internal::I18NNatives;
+
+namespace v8_i18n {
+
+Extension::Extension()
+ : v8::Extension("v8/i18n",
+ reinterpret_cast<const char*>(
+ I18NNatives::GetScriptsSource().start()),
+ 0,
+ 0,
+ I18NNatives::GetScriptsSource().length()) {}
+
+v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
+ v8::Handle<v8::String> name) {
+ // Standalone, helper methods.
+ if (name->Equals(v8::String::New("NativeJSCanonicalizeLanguageTag"))) {
+ return v8::FunctionTemplate::New(JSCanonicalizeLanguageTag);
+ } else if (name->Equals(v8::String::New("NativeJSAvailableLocalesOf"))) {
+ return v8::FunctionTemplate::New(JSAvailableLocalesOf);
+ } else if (name->Equals(v8::String::New("NativeJSGetDefaultICULocale"))) {
+ return v8::FunctionTemplate::New(JSGetDefaultICULocale);
+ } else if (name->Equals(v8::String::New("NativeJSGetLanguageTagVariants"))) {
+ return v8::FunctionTemplate::New(JSGetLanguageTagVariants);
+ }
+
+ // Date format and parse.
+ if (name->Equals(v8::String::New("NativeJSCreateDateTimeFormat"))) {
+ return v8::FunctionTemplate::New(DateFormat::JSCreateDateTimeFormat);
+ } else if (name->Equals(v8::String::New("NativeJSInternalDateFormat"))) {
+ return v8::FunctionTemplate::New(DateFormat::JSInternalFormat);
+ } else if (name->Equals(v8::String::New("NativeJSInternalDateParse"))) {
+ return v8::FunctionTemplate::New(DateFormat::JSInternalParse);
+ }
+
+ // Number format and parse.
+ if (name->Equals(v8::String::New("NativeJSCreateNumberFormat"))) {
+ return v8::FunctionTemplate::New(NumberFormat::JSCreateNumberFormat);
+ } else if (name->Equals(v8::String::New("NativeJSInternalNumberFormat"))) {
+ return v8::FunctionTemplate::New(NumberFormat::JSInternalFormat);
+ } else if (name->Equals(v8::String::New("NativeJSInternalNumberParse"))) {
+ return v8::FunctionTemplate::New(NumberFormat::JSInternalParse);
+ }
+
+ // Collator.
+ if (name->Equals(v8::String::New("NativeJSCreateCollator"))) {
+ return v8::FunctionTemplate::New(Collator::JSCreateCollator);
+ } else if (name->Equals(v8::String::New("NativeJSInternalCompare"))) {
+ return v8::FunctionTemplate::New(Collator::JSInternalCompare);
+ }
+
+ // Break iterator.
+ if (name->Equals(v8::String::New("NativeJSCreateBreakIterator"))) {
+ return v8::FunctionTemplate::New(BreakIterator::JSCreateBreakIterator);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIteratorAdoptText"))) {
+ return v8::FunctionTemplate::New(
+ BreakIterator::JSInternalBreakIteratorAdoptText);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIteratorFirst"))) {
+ return v8::FunctionTemplate::New(
+ BreakIterator::JSInternalBreakIteratorFirst);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIteratorNext"))) {
+ return v8::FunctionTemplate::New(
+ BreakIterator::JSInternalBreakIteratorNext);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIteratorCurrent"))) {
+ return v8::FunctionTemplate::New(
+ BreakIterator::JSInternalBreakIteratorCurrent);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIteratorBreakType"))) {
+ return v8::FunctionTemplate::New(
+ BreakIterator::JSInternalBreakIteratorBreakType);
+ }
+
+ return v8::Handle<v8::FunctionTemplate>();
+}
+
+void Extension::Register() {
+ static Extension i18n_extension;
+ static v8::DeclareExtension extension_declaration(&i18n_extension);
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/i18n-extension.h b/deps/v8/src/extensions/i18n/i18n-extension.h
new file mode 100644
index 0000000000..050c336a67
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/i18n-extension.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
+#define V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8_i18n {
+
+class Extension : public v8::Extension {
+ public:
+ Extension();
+
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+
+ static void Register();
+
+ private:
+ static Extension* extension_;
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.cc b/deps/v8/src/extensions/i18n/i18n-utils.cc
new file mode 100644
index 0000000000..d8d3c12aff
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/i18n-utils.cc
@@ -0,0 +1,174 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "i18n-utils.h"
+
+#include <string.h>
+
+#include "unicode/unistr.h"
+
+namespace v8_i18n {
+
+// static
+void Utils::StrNCopy(char* dest, int length, const char* src) {
+ if (!dest || !src) return;
+
+ strncpy(dest, src, length);
+ dest[length - 1] = '\0';
+}
+
+// static
+bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
+ icu::UnicodeString* output) {
+ v8::String::Utf8Value utf8_value(input);
+
+ if (*utf8_value == NULL) return false;
+
+ output->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
+
+ return true;
+}
+
+// static
+bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result) {
+ if (!setting || !result) return false;
+
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
+ return V8StringToUnicodeString(value, result);
+ }
+ return false;
+}
+
+// static
+bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ int32_t* result) {
+ if (!setting || !result) return false;
+
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull() && value->IsNumber()) {
+ *result = static_cast<int32_t>(value->Int32Value());
+ return true;
+ }
+ return false;
+}
+
+// static
+bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ bool* result) {
+ if (!setting || !result) return false;
+
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull() && value->IsBoolean()) {
+ *result = static_cast<bool>(value->BooleanValue());
+ return true;
+ }
+ return false;
+}
+
+// static
+void Utils::AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length) {
+ int32_t length =
+ source_length < target_length ? source_length : target_length;
+
+ if (length <= 0) {
+ return;
+ }
+
+ for (int32_t i = 0; i < length - 1; ++i) {
+ target[i] = static_cast<UChar>(source[i]);
+ }
+
+ target[length - 1] = 0x0u;
+}
+
+// static
+// Chrome Linux doesn't like static initializers in class, so we create
+// template on demand.
+v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
+ static v8::Persistent<v8::ObjectTemplate> icu_template;
+
+ if (icu_template.IsEmpty()) {
+ v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+
+ // Set aside internal field for ICU class.
+ raw_template->SetInternalFieldCount(1);
+
+ icu_template.Reset(isolate, raw_template);
+ }
+
+ return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template);
+}
+
+// static
+// Chrome Linux doesn't like static initializers in class, so we create
+// template on demand. This one has 2 internal fields.
+v8::Local<v8::ObjectTemplate> Utils::GetTemplate2(v8::Isolate* isolate) {
+ static v8::Persistent<v8::ObjectTemplate> icu_template_2;
+
+ if (icu_template_2.IsEmpty()) {
+ v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+
+ // Set aside internal field for ICU class and additional data.
+ raw_template->SetInternalFieldCount(2);
+
+ icu_template_2.Reset(isolate, raw_template);
+ }
+
+ return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template_2);
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.h b/deps/v8/src/extensions/i18n/i18n-utils.h
new file mode 100644
index 0000000000..db5d1b6ac0
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/i18n-utils.h
@@ -0,0 +1,91 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_SRC_UTILS_H_
+#define V8_EXTENSIONS_I18N_SRC_UTILS_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class UnicodeString;
+}
+
+namespace v8_i18n {
+
+class Utils {
+ public:
+ // Safe string copy. Null terminates the destination. Copies at most
+ // (length - 1) bytes.
+ // We can't use snprintf since it's not supported on all relevant platforms.
+ // We can't use OS::SNPrintF, it's only for internal code.
+ static void StrNCopy(char* dest, int length, const char* src);
+
+ // Converts v8::String into UnicodeString. Returns false if input
+ // can't be converted into utf8.
+ static bool V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
+ icu::UnicodeString* output);
+
+ // Extract a String setting named in |settings| and set it to |result|.
+ // Return true if it's specified. Otherwise, return false.
+ static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result);
+
+ // Extract a Integer setting named in |settings| and set it to |result|.
+ // Return true if it's specified. Otherwise, return false.
+ static bool ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ int32_t* result);
+
+ // Extract a Boolean setting named in |settings| and set it to |result|.
+ // Return true if it's specified. Otherwise, return false.
+ static bool ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ bool* result);
+
+ // Converts ASCII array into UChar array.
+ // Target is always \0 terminated.
+ static void AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length);
+
+ // Creates an ObjectTemplate with one internal field.
+ static v8::Local<v8::ObjectTemplate> GetTemplate(v8::Isolate* isolate);
+
+ // Creates an ObjectTemplate with two internal fields.
+ static v8::Local<v8::ObjectTemplate> GetTemplate2(v8::Isolate* isolate);
+
+ private:
+ Utils() {}
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_UTILS_H_
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.js b/deps/v8/src/extensions/i18n/i18n-utils.js
new file mode 100644
index 0000000000..d7e9486c50
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/i18n-utils.js
@@ -0,0 +1,541 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function addBoundMethod(obj, methodName, implementation, length) {
+ function getter() {
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject === undefined) {
+ throw new TypeError('Method ' + methodName + ' called on a ' +
+ 'non-object or on a wrong type of object.');
+ }
+ var internalName = '__bound' + methodName + '__';
+ if (this[internalName] === undefined) {
+ var that = this;
+ var boundMethod;
+ if (length === undefined || length === 2) {
+ boundMethod = function(x, y) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x, y);
+ }
+ } else if (length === 1) {
+ boundMethod = function(x) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x);
+ }
+ } else {
+ boundMethod = function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ // DateTimeFormat.format needs to be 0 arg method, but can stil
+ // receive optional dateValue param. If one was provided, pass it
+ // along.
+ if (arguments.length > 0) {
+ return implementation(that, arguments[0]);
+ } else {
+ return implementation(that);
+ }
+ }
+ }
+ %FunctionSetName(boundMethod, internalName);
+ %FunctionRemovePrototype(boundMethod);
+ %SetNativeFlag(boundMethod);
+ this[internalName] = boundMethod;
+ }
+ return this[internalName];
+ }
+
+ %FunctionSetName(getter, methodName);
+ %FunctionRemovePrototype(getter);
+ %SetNativeFlag(getter);
+
+ Object.defineProperty(obj.prototype, methodName, {
+ get: getter,
+ enumerable: false,
+ configurable: true
+ });
+}
+
+
+/**
+ * Returns an intersection of locales and service supported locales.
+ * Parameter locales is treated as a priority list.
+ */
+function supportedLocalesOf(service, locales, options) {
+ if (service.match(SERVICE_RE) === null) {
+ throw new Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Provide defaults if matcher was not specified.
+ if (options === undefined) {
+ options = {};
+ } else {
+ options = toObject(options);
+ }
+
+ var matcher = options.localeMatcher;
+ if (matcher !== undefined) {
+ matcher = String(matcher);
+ if (matcher !== 'lookup' && matcher !== 'best fit') {
+ throw new RangeError('Illegal value for localeMatcher:' + matcher);
+ }
+ } else {
+ matcher = 'best fit';
+ }
+
+ var requestedLocales = initializeLocaleList(locales);
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ // Use either best fit or lookup algorithm to match locales.
+ if (matcher === 'best fit') {
+ return initializeLocaleList(bestFitSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+ }
+
+ return initializeLocaleList(lookupSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the BCP 47 Lookup algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
+ var matchedLocales = [];
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove -u- extension.
+ var locale = requestedLocales[i].replace(UNICODE_EXTENSION_RE, '');
+ do {
+ if (availableLocales[locale] !== undefined) {
+ // Push requested locale not the resolved one.
+ matchedLocales.push(requestedLocales[i]);
+ break;
+ }
+ // Truncate locale if possible, if not break.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ return matchedLocales;
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the implementation
+ * dependent algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupSupportedLocalesOf(requestedLocales, availableLocales);
+}
+
+
+/**
+ * Returns a getOption function that extracts property value for given
+ * options object. If property is missing it returns defaultValue. If value
+ * is out of range for that property it throws RangeError.
+ */
+function getGetOption(options, caller) {
+ if (options === undefined) {
+ throw new Error('Internal ' + caller + ' error. ' +
+ 'Default options are missing.');
+ }
+
+ var getOption = function getOption(property, type, values, defaultValue) {
+ if (options[property] !== undefined) {
+ var value = options[property];
+ switch (type) {
+ case 'boolean':
+ value = Boolean(value);
+ break;
+ case 'string':
+ value = String(value);
+ break;
+ case 'number':
+ value = Number(value);
+ break;
+ default:
+ throw new Error('Internal error. Wrong value type.');
+ }
+ if (values !== undefined && values.indexOf(value) === -1) {
+ throw new RangeError('Value ' + value + ' out of range for ' + caller +
+ ' options property ' + property);
+ }
+
+ return value;
+ }
+
+ return defaultValue;
+ }
+
+ return getOption;
+}
+
+
+/**
+ * Compares a BCP 47 language priority list requestedLocales against the locales
+ * in availableLocales and determines the best available language to meet the
+ * request. Two algorithms are available to match the locales: the Lookup
+ * algorithm described in RFC 4647 section 3.4, and an implementation dependent
+ * best-fit algorithm. Independent of the locale matching algorithm, options
+ * specified through Unicode locale extension sequences are negotiated
+ * separately, taking the caller's relevant extension keys and locale data as
+ * well as client-provided options into consideration. Returns an object with
+ * a locale property whose value is the language tag of the selected locale,
+ * and properties for each key in relevantExtensionKeys providing the selected
+ * value for that key.
+ */
+function resolveLocale(service, requestedLocales, options) {
+ requestedLocales = initializeLocaleList(requestedLocales);
+
+ var getOption = getGetOption(options, service);
+ var matcher = getOption('localeMatcher', 'string',
+ ['lookup', 'best fit'], 'best fit');
+ var resolved;
+ if (matcher === 'lookup') {
+ resolved = lookupMatcher(service, requestedLocales);
+ } else {
+ resolved = bestFitMatcher(service, requestedLocales);
+ }
+
+ return resolved;
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using basic
+ * lookup algorithm.
+ */
+function lookupMatcher(service, requestedLocales) {
+ native function NativeJSGetDefaultICULocale();
+
+ if (service.match(SERVICE_RE) === null) {
+ throw new Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove all extensions.
+ var locale = requestedLocales[i].replace(ANY_EXTENSION_RE, '');
+ do {
+ if (AVAILABLE_LOCALES[service][locale] !== undefined) {
+ // Return the resolved locale and extension.
+ var extensionMatch = requestedLocales[i].match(UNICODE_EXTENSION_RE);
+ var extension = (extensionMatch === null) ? '' : extensionMatch[0];
+ return {'locale': locale, 'extension': extension, 'position': i};
+ }
+ // Truncate locale if possible.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ // Didn't find a match, return default.
+ if (DEFAULT_ICU_LOCALE === undefined) {
+ DEFAULT_ICU_LOCALE = NativeJSGetDefaultICULocale();
+ }
+
+ return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using
+ * implementation dependend algorithm.
+ */
+function bestFitMatcher(service, requestedLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupMatcher(service, requestedLocales);
+}
+
+
+/**
+ * Parses Unicode extension into key - value map.
+ * Returns empty object if the extension string is invalid.
+ * We are not concerned with the validity of the values at this point.
+ */
+function parseExtension(extension) {
+ var extensionSplit = extension.split('-');
+
+ // Assume ['', 'u', ...] input, but don't throw.
+ if (extensionSplit.length <= 2 ||
+ (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
+ return {};
+ }
+
+ // Key is {2}alphanum, value is {3,8}alphanum.
+ // Some keys may not have explicit values (booleans).
+ var extensionMap = {};
+ var previousKey = undefined;
+ for (var i = 2; i < extensionSplit.length; ++i) {
+ var length = extensionSplit[i].length;
+ var element = extensionSplit[i];
+ if (length === 2) {
+ extensionMap[element] = undefined;
+ previousKey = element;
+ } else if (length >= 3 && length <=8 && previousKey !== undefined) {
+ extensionMap[previousKey] = element;
+ previousKey = undefined;
+ } else {
+ // There is a value that's too long, or that doesn't have a key.
+ return {};
+ }
+ }
+
+ return extensionMap;
+}
+
+
+/**
+ * Converts parameter to an Object if possible.
+ */
+function toObject(value) {
+ if (value === undefined || value === null) {
+ throw new TypeError('Value cannot be converted to an Object.');
+ }
+
+ return Object(value);
+}
+
+
+/**
+ * Populates internalOptions object with boolean key-value pairs
+ * from extensionMap and options.
+ * Returns filtered extension (number and date format constructors use
+ * Unicode extensions for passing parameters to ICU).
+ * It's used for extension-option pairs only, e.g. kn-normalization, but not
+ * for 'sensitivity' since it doesn't have extension equivalent.
+ * Extensions like nu and ca don't have options equivalent, so we place
+ * undefined in the map.property to denote that.
+ */
+function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
+ var extension = '';
+
+ var updateExtension = function updateExtension(key, value) {
+ return '-' + key + '-' + String(value);
+ }
+
+ var updateProperty = function updateProperty(property, type, value) {
+ if (type === 'boolean' && (typeof value === 'string')) {
+ value = (value === 'true') ? true : false;
+ }
+
+ if (property !== undefined) {
+ defineWEProperty(outOptions, property, value);
+ }
+ }
+
+ for (var key in keyValues) {
+ if (keyValues.hasOwnProperty(key)) {
+ var value = undefined;
+ var map = keyValues[key];
+ if (map.property !== undefined) {
+ // This may return true if user specifies numeric: 'false', since
+ // Boolean('nonempty') === true.
+ value = getOption(map.property, map.type, map.values);
+ }
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ continue;
+ }
+ // User options didn't have it, check Unicode extension.
+ // Here we want to convert strings 'true', 'false' into proper Boolean
+ // values (not a user error).
+ if (extensionMap.hasOwnProperty(key)) {
+ value = extensionMap[key];
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ } else if (map.type === 'boolean') {
+ // Boolean keys are allowed not to have values in Unicode extension.
+ // Those default to true.
+ updateProperty(map.property, map.type, true);
+ extension += updateExtension(key, true);
+ }
+ }
+ }
+ }
+
+ return extension === ''? '' : '-u' + extension;
+}
+
+
+/**
+ * Converts all OwnProperties into
+ * configurable: false, writable: false, enumerable: true.
+ */
+function freezeArray(array) {
+ array.forEach(function(element, index) {
+ Object.defineProperty(array, index, {value: element,
+ configurable: false,
+ writable: false,
+ enumerable: true});
+ });
+
+ Object.defineProperty(array, 'length', {value: array.length,
+ writable: false});
+
+ return array;
+}
+
+
+/**
+ * It's sometimes desireable to leave user requested locale instead of ICU
+ * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
+ * one, if that was what user requested).
+ * This function returns user specified tag if its maximized form matches ICU
+ * resolved locale. If not we return ICU result.
+ */
+function getOptimalLanguageTag(original, resolved) {
+ // Returns Array<Object>, where each object has maximized and base properties.
+ // Maximized: zh -> zh-Hans-CN
+ // Base: zh-CN-u-ca-gregory -> zh-CN
+ native function NativeJSGetLanguageTagVariants();
+
+ // Take care of grandfathered or simple cases.
+ if (original === resolved) {
+ return original;
+ }
+
+ var locales = NativeJSGetLanguageTagVariants([original, resolved]);
+ if (locales[0].maximized !== locales[1].maximized) {
+ return resolved;
+ }
+
+ // Preserve extensions of resolved locale, but swap base tags with original.
+ var resolvedBase = new RegExp('^' + locales[1].base);
+ return resolved.replace(resolvedBase, locales[0].base);
+}
+
+
+/**
+ * Returns an Object that contains all of supported locales for a given
+ * service.
+ * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
+ * that is supported. This is required by the spec.
+ */
+function getAvailableLocalesOf(service) {
+ native function NativeJSAvailableLocalesOf();
+ var available = NativeJSAvailableLocalesOf(service);
+
+ for (var i in available) {
+ if (available.hasOwnProperty(i)) {
+ var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
+ if (parts !== null) {
+ // Build xx-ZZ. We don't care about the actual value,
+ // as long it's not undefined.
+ available[parts[1] + '-' + parts[3]] = null;
+ }
+ }
+ }
+
+ return available;
+}
+
+
+/**
+ * Defines a property and sets writable and enumerable to true.
+ * Configurable is false by default.
+ */
+function defineWEProperty(object, property, value) {
+ Object.defineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets configurable descriptor to false.
+ */
+function addWEPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWEProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Defines a property and sets writable, enumerable and configurable to true.
+ */
+function defineWECProperty(object, property, value) {
+ Object.defineProperty(object, property,
+ {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets all descriptors to true.
+ */
+function addWECPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWECProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Returns titlecased word, aMeRricA -> America.
+ */
+function toTitleCaseWord(word) {
+ return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
+}
diff --git a/deps/v8/src/extensions/i18n/locale.cc b/deps/v8/src/extensions/i18n/locale.cc
new file mode 100644
index 0000000000..b32cc30b16
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/locale.cc
@@ -0,0 +1,248 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "locale.h"
+
+#include <string.h>
+
+#include "unicode/brkiter.h"
+#include "unicode/coll.h"
+#include "unicode/datefmt.h"
+#include "unicode/numfmt.h"
+#include "unicode/uloc.h"
+#include "unicode/uversion.h"
+
+namespace v8_i18n {
+
+void JSCanonicalizeLanguageTag(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Expect locale id which is a string.
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale identifier, as a string, is required.")));
+ return;
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+
+ // Return value which denotes invalid language tag.
+ const char* const kInvalidTag = "invalid-tag";
+
+ v8::String::AsciiValue locale_id(args[0]->ToString());
+ if (*locale_id == NULL) {
+ args.GetReturnValue().Set(v8::String::New(kInvalidTag));
+ return;
+ }
+
+ uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &error);
+ if (U_FAILURE(error) || icu_length == 0) {
+ args.GetReturnValue().Set(v8::String::New(kInvalidTag));
+ return;
+ }
+
+ char result[ULOC_FULLNAME_CAPACITY];
+
+ // Force strict BCP47 rules.
+ uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
+
+ if (U_FAILURE(error)) {
+ args.GetReturnValue().Set(v8::String::New(kInvalidTag));
+ return;
+ }
+
+ args.GetReturnValue().Set(v8::String::New(result));
+}
+
+void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Expect service name which is a string.
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Service identifier, as a string, is required.")));
+ return;
+ }
+
+ const icu::Locale* available_locales = NULL;
+
+ int32_t count = 0;
+ v8::String::AsciiValue service(args[0]->ToString());
+ if (strcmp(*service, "collator") == 0) {
+ available_locales = icu::Collator::getAvailableLocales(count);
+ } else if (strcmp(*service, "numberformat") == 0) {
+ available_locales = icu::NumberFormat::getAvailableLocales(count);
+ } else if (strcmp(*service, "dateformat") == 0) {
+ available_locales = icu::DateFormat::getAvailableLocales(count);
+ } else if (strcmp(*service, "breakiterator") == 0) {
+ available_locales = icu::BreakIterator::getAvailableLocales(count);
+ }
+
+ v8::TryCatch try_catch;
+ UErrorCode error = U_ZERO_ERROR;
+ char result[ULOC_FULLNAME_CAPACITY];
+ v8::Handle<v8::Object> locales = v8::Object::New();
+
+ for (int32_t i = 0; i < count; ++i) {
+ const char* icu_name = available_locales[i].getName();
+
+ error = U_ZERO_ERROR;
+ // No need to force strict BCP47 rules.
+ uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+ if (U_FAILURE(error)) {
+ // This shouldn't happen, but lets not break the user.
+ continue;
+ }
+
+ // Index is just a dummy value for the property value.
+ locales->Set(v8::String::New(result), v8::Integer::New(i));
+ if (try_catch.HasCaught()) {
+ // Ignore error, but stop processing and return.
+ break;
+ }
+ }
+
+ args.GetReturnValue().Set(locales);
+}
+
+void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ icu::Locale default_locale;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ args.GetReturnValue().Set(v8::String::New(result));
+ return;
+ }
+
+ args.GetReturnValue().Set(v8::String::New("und"));
+}
+
+void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::TryCatch try_catch;
+
+ // Expect an array of strings.
+ if (args.Length() != 1 || !args[0]->IsArray()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error. Expected Array<String>.")));
+ return;
+ }
+
+ v8::Local<v8::Array> input = v8::Local<v8::Array>::Cast(args[0]);
+ v8::Handle<v8::Array> output = v8::Array::New(input->Length());
+ for (unsigned int i = 0; i < input->Length(); ++i) {
+ v8::Local<v8::Value> locale_id = input->Get(i);
+ if (try_catch.HasCaught()) {
+ break;
+ }
+
+ if (!locale_id->IsString()) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error. Array element is missing "
+ "or it isn't a string.")));
+ return;
+ }
+
+ v8::String::AsciiValue ascii_locale_id(locale_id);
+ if (*ascii_locale_id == NULL) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error. Non-ASCII locale identifier.")));
+ return;
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+
+ // Convert from BCP47 to ICU format.
+ // de-DE-u-co-phonebk -> de_DE@collation=phonebook
+ char icu_locale[ULOC_FULLNAME_CAPACITY];
+ int icu_locale_length = 0;
+ uloc_forLanguageTag(*ascii_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
+ &icu_locale_length, &error);
+ if (U_FAILURE(error) || icu_locale_length == 0) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error. Failed to convert locale to ICU.")));
+ return;
+ }
+
+ // Maximize the locale.
+ // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
+ char icu_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(
+ icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Remove extensions from maximized locale.
+ // de_Latn_DE@collation=phonebook -> de_Latn_DE
+ char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_max_locale, icu_base_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Get original name without extensions.
+ // de_DE@collation=phonebook -> de_DE
+ char icu_base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Convert from ICU locale format to BCP47 format.
+ // de_Latn_DE -> de-Latn-DE
+ char base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
+ ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ // de_DE -> de-DE
+ char base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(
+ icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ if (U_FAILURE(error)) {
+ v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Internal error. Couldn't generate maximized "
+ "or base locale.")));
+ return;
+ }
+
+ v8::Handle<v8::Object> result = v8::Object::New();
+ result->Set(v8::String::New("maximized"), v8::String::New(base_max_locale));
+ result->Set(v8::String::New("base"), v8::String::New(base_locale));
+ if (try_catch.HasCaught()) {
+ break;
+ }
+
+ output->Set(i, result);
+ if (try_catch.HasCaught()) {
+ break;
+ }
+ }
+
+ args.GetReturnValue().Set(output);
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/locale.h b/deps/v8/src/extensions/i18n/locale.h
new file mode 100644
index 0000000000..c39568e5d9
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/locale.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_SRC_LOCALE_H_
+#define V8_EXTENSIONS_I18N_SRC_LOCALE_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace v8_i18n {
+
+// Canonicalizes the BCP47 language tag using BCP47 rules.
+// Returns 'invalid-tag' in case input was not well formed.
+void JSCanonicalizeLanguageTag(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+// Returns a list of available locales for collator, date or number formatter.
+void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+// Returns default ICU locale.
+void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+// Returns an array of objects, that have maximized and base names of inputs.
+// Unicode extensions are dropped from both.
+// Input: ['zh-TW-u-nu-thai', 'sr']
+// Output: [{maximized: 'zh-Hant-TW', base: 'zh-TW'},
+// {maximized: 'sr-Cyrl-RS', base: 'sr'}]
+void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_LOCALE_H_
diff --git a/deps/v8/src/extensions/i18n/locale.js b/deps/v8/src/extensions/i18n/locale.js
new file mode 100644
index 0000000000..ea95b87192
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/locale.js
@@ -0,0 +1,192 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Canonicalizes the language tag, or throws in case the tag is invalid.
+ */
+function canonicalizeLanguageTag(localeID) {
+ native function NativeJSCanonicalizeLanguageTag();
+
+ // null is typeof 'object' so we have to do extra check.
+ if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+ localeID === null) {
+ throw new TypeError('Language ID should be string or object.');
+ }
+
+ var localeString = String(localeID);
+
+ if (isValidLanguageTag(localeString) === false) {
+ throw new RangeError('Invalid language tag: ' + localeString);
+ }
+
+ // This call will strip -kn but not -kn-true extensions.
+ // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
+ // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
+ // upgrade to ICU 4.9.
+ var tag = NativeJSCanonicalizeLanguageTag(localeString);
+ if (tag === 'invalid-tag') {
+ throw new RangeError('Invalid language tag: ' + localeString);
+ }
+
+ return tag;
+}
+
+
+/**
+ * Returns an array where all locales are canonicalized and duplicates removed.
+ * Throws on locales that are not well formed BCP47 tags.
+ */
+function initializeLocaleList(locales) {
+ var seen = [];
+ if (locales === undefined) {
+ // Constructor is called without arguments.
+ seen = [];
+ } else {
+ // We allow single string localeID.
+ if (typeof locales === 'string') {
+ seen.push(canonicalizeLanguageTag(locales));
+ return freezeArray(seen);
+ }
+
+ var o = toObject(locales);
+ // Converts it to UInt32 (>>> is shr on 32bit integers).
+ var len = o.length >>> 0;
+
+ for (var k = 0; k < len; k++) {
+ if (k in o) {
+ var value = o[k];
+
+ var tag = canonicalizeLanguageTag(value);
+
+ if (seen.indexOf(tag) === -1) {
+ seen.push(tag);
+ }
+ }
+ }
+ }
+
+ return freezeArray(seen);
+}
+
+
+/**
+ * Validates the language tag. Section 2.2.9 of the bcp47 spec
+ * defines a valid tag.
+ *
+ * ICU is too permissible and lets invalid tags, like
+ * hant-cmn-cn, through.
+ *
+ * Returns false if the language tag is invalid.
+ */
+function isValidLanguageTag(locale) {
+ // Check if it's well-formed, including grandfadered tags.
+ if (LANGUAGE_TAG_RE.test(locale) === false) {
+ return false;
+ }
+
+ // Just return if it's a x- form. It's all private.
+ if (locale.indexOf('x-') === 0) {
+ return true;
+ }
+
+ // Check if there are any duplicate variants or singletons (extensions).
+
+ // Remove private use section.
+ locale = locale.split(/-x-/)[0];
+
+ // Skip language since it can match variant regex, so we start from 1.
+ // We are matching i-klingon here, but that's ok, since i-klingon-klingon
+ // is not valid and would fail LANGUAGE_TAG_RE test.
+ var variants = [];
+ var extensions = [];
+ var parts = locale.split(/-/);
+ for (var i = 1; i < parts.length; i++) {
+ var value = parts[i];
+ if (LANGUAGE_VARIANT_RE.test(value) === true && extensions.length === 0) {
+ if (variants.indexOf(value) === -1) {
+ variants.push(value);
+ } else {
+ return false;
+ }
+ }
+
+ if (LANGUAGE_SINGLETON_RE.test(value) === true) {
+ if (extensions.indexOf(value) === -1) {
+ extensions.push(value);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+
+/**
+ * Builds a regular expresion that validates the language tag
+ * against bcp47 spec.
+ * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
+ * Runs on load and initializes the global REs.
+ */
+(function() {
+ var alpha = '[a-zA-Z]';
+ var digit = '[0-9]';
+ var alphanum = '(' + alpha + '|' + digit + ')';
+ var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
+ 'zh-min|zh-min-nan|zh-xiang)';
+ var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
+ 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
+ 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
+ var grandfathered = '(' + irregular + '|' + regular + ')';
+ var privateUse = '(x(-' + alphanum + '{1,8})+)';
+
+ var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
+ LANGUAGE_SINGLETON_RE = new RegExp('^' + singleton + '$', 'i');
+
+ var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
+
+ var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
+ LANGUAGE_VARIANT_RE = new RegExp('^' + variant + '$', 'i');
+
+ var region = '(' + alpha + '{2}|' + digit + '{3})';
+ var script = '(' + alpha + '{4})';
+ var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
+ var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
+ alpha + '{5,8})';
+ var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
+ variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
+
+ var languageTag =
+ '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
+ LANGUAGE_TAG_RE = new RegExp(languageTag, 'i');
+})();
diff --git a/deps/v8/src/extensions/i18n/number-format.cc b/deps/v8/src/extensions/i18n/number-format.cc
new file mode 100644
index 0000000000..2240b0846b
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/number-format.cc
@@ -0,0 +1,418 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "number-format.h"
+
+#include <string.h>
+
+#include "i18n-utils.h"
+#include "unicode/curramt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/uchar.h"
+#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+namespace v8_i18n {
+
+static icu::DecimalFormat* InitializeNumberFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::Object>,
+ v8::Handle<v8::Object>);
+static icu::DecimalFormat* CreateICUNumberFormat(const icu::Locale&,
+ v8::Handle<v8::Object>);
+static void SetResolvedSettings(const icu::Locale&,
+ icu::DecimalFormat*,
+ v8::Handle<v8::Object>);
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+ v8::Handle<v8::Object> obj) {
+ v8::HandleScope handle_scope;
+
+ // v8::ObjectTemplate doesn't have HasInstance method so we can't check
+ // if obj is an instance of NumberFormat class. We'll check for a property
+ // that has to be in the object. The same applies to other services, like
+ // Collator and DateTimeFormat.
+ if (obj->HasOwnProperty(v8::String::New("numberFormat"))) {
+ return static_cast<icu::DecimalFormat*>(
+ obj->GetAlignedPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a date time formatter.
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
+ delete UnpackNumberFormat(handle);
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose(isolate);
+}
+
+void NumberFormat::JSInternalFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsNumber()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Formatter and numeric value have to be specified.")));
+ return;
+ }
+
+ icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
+ if (!number_format) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("NumberFormat method called on an object "
+ "that is not a NumberFormat.")));
+ return;
+ }
+
+ // ICU will handle actual NaN value properly and return NaN string.
+ icu::UnicodeString result;
+ number_format->format(args[1]->NumberValue(), result);
+
+ args.GetReturnValue().Set(v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+void NumberFormat::JSInternalParse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Formatter and string have to be specified.")));
+ return;
+ }
+
+ icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
+ if (!number_format) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("NumberFormat method called on an object "
+ "that is not a NumberFormat.")));
+ return;
+ }
+
+ // ICU will handle actual NaN value properly and return NaN string.
+ icu::UnicodeString string_number;
+ if (!Utils::V8StringToUnicodeString(args[1]->ToString(), &string_number)) {
+ string_number = "";
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Formattable result;
+ // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
+ // to be part of Chrome.
+ // TODO(cira): Include currency parsing code using parseCurrency call.
+ // We need to check if the formatter parses all currencies or only the
+ // one it was constructed with (it will impact the API - how to return ISO
+ // code and the value).
+ number_format->parse(string_number, result, status);
+ if (U_FAILURE(status)) {
+ return;
+ }
+
+ switch (result.getType()) {
+ case icu::Formattable::kDouble:
+ args.GetReturnValue().Set(result.getDouble());
+ return;
+ case icu::Formattable::kLong:
+ args.GetReturnValue().Set(v8::Number::New(result.getLong()));
+ return;
+ case icu::Formattable::kInt64:
+ args.GetReturnValue().Set(v8::Number::New(result.getInt64()));
+ return;
+ default:
+ return;
+ }
+}
+
+void NumberFormat::JSCreateNumberFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 ||
+ !args[0]->IsString() ||
+ !args[1]->IsObject() ||
+ !args[2]->IsObject()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, wrong parameters.")));
+ return;
+ }
+
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> number_format_template =
+ Utils::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object = number_format_template->NewInstance();
+ // But the handle shouldn't be empty.
+ // That can happen if there was a stack overflow when creating the object.
+ if (local_object.IsEmpty()) {
+ args.GetReturnValue().Set(local_object);
+ return;
+ }
+
+ // Set number formatter as internal field of the resulting JS object.
+ icu::DecimalFormat* number_format = InitializeNumberFormat(
+ args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
+
+ if (!number_format) {
+ v8::ThrowException(v8::Exception::Error(v8::String::New(
+ "Internal error. Couldn't create ICU number formatter.")));
+ return;
+ } else {
+ local_object->SetAlignedPointerInInternalField(0, number_format);
+
+ v8::TryCatch try_catch;
+ local_object->Set(v8::String::New("numberFormat"),
+ v8::String::New("valid"));
+ if (try_catch.HasCaught()) {
+ v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Internal error, couldn't set property.")));
+ return;
+ }
+ }
+
+ v8::Persistent<v8::Object> wrapper(isolate, local_object);
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak<void>(NULL, &DeleteNumberFormat);
+ args.GetReturnValue().Set(wrapper);
+ wrapper.ClearAndLeak();
+}
+
+static icu::DecimalFormat* InitializeNumberFormat(
+ v8::Handle<v8::String> locale,
+ v8::Handle<v8::Object> options,
+ v8::Handle<v8::Object> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::AsciiValue bcp47_locale(locale);
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::DecimalFormat* number_format =
+ CreateICUNumberFormat(icu_locale, options);
+ if (!number_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ number_format = CreateICUNumberFormat(no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedSettings(no_extension_locale, number_format, resolved);
+ } else {
+ SetResolvedSettings(icu_locale, number_format, resolved);
+ }
+
+ return number_format;
+}
+
+static icu::DecimalFormat* CreateICUNumberFormat(
+ const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
+ // Make formatter from options. Numbering system is added
+ // to the locale as Unicode extension (if it was specified at all).
+ UErrorCode status = U_ZERO_ERROR;
+ icu::DecimalFormat* number_format = NULL;
+ icu::UnicodeString style;
+ icu::UnicodeString currency;
+ if (Utils::ExtractStringSetting(options, "style", &style)) {
+ if (style == UNICODE_STRING_SIMPLE("currency")) {
+ Utils::ExtractStringSetting(options, "currency", &currency);
+
+ icu::UnicodeString display;
+ Utils::ExtractStringSetting(options, "currencyDisplay", &display);
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
+ icu::NumberFormat::EStyles style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ style = icu::NumberFormat::kIsoCurrencyStyle;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ style = icu::NumberFormat::kPluralCurrencyStyle;
+ } else {
+ style = icu::NumberFormat::kCurrencyStyle;
+ }
+#else // ICU version is 4.8 or above (we ignore versions below 4.0).
+ UNumberFormatStyle style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ style = UNUM_CURRENCY_ISO;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ style = UNUM_CURRENCY_PLURAL;
+ } else {
+ style = UNUM_CURRENCY;
+ }
+#endif
+
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, style, status));
+ } else if (style == UNICODE_STRING_SIMPLE("percent")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+ // Make sure 1.1% doesn't go into 2%.
+ number_format->setMinimumFractionDigits(1);
+ } else {
+ // Make a decimal instance by default.
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+
+ // Set all options.
+ if (!currency.isEmpty()) {
+ number_format->setCurrency(currency.getBuffer(), status);
+ }
+
+ int32_t digits;
+ if (Utils::ExtractIntegerSetting(
+ options, "minimumIntegerDigits", &digits)) {
+ number_format->setMinimumIntegerDigits(digits);
+ }
+
+ if (Utils::ExtractIntegerSetting(
+ options, "minimumFractionDigits", &digits)) {
+ number_format->setMinimumFractionDigits(digits);
+ }
+
+ if (Utils::ExtractIntegerSetting(
+ options, "maximumFractionDigits", &digits)) {
+ number_format->setMaximumFractionDigits(digits);
+ }
+
+ bool significant_digits_used = false;
+ if (Utils::ExtractIntegerSetting(
+ options, "minimumSignificantDigits", &digits)) {
+ number_format->setMinimumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ if (Utils::ExtractIntegerSetting(
+ options, "maximumSignificantDigits", &digits)) {
+ number_format->setMaximumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ number_format->setSignificantDigitsUsed(significant_digits_used);
+
+ bool grouping;
+ if (Utils::ExtractBooleanSetting(options, "useGrouping", &grouping)) {
+ number_format->setGroupingUsed(grouping);
+ }
+
+ // Set rounding mode.
+ number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+
+ return number_format;
+}
+
+static void SetResolvedSettings(const icu::Locale& icu_locale,
+ icu::DecimalFormat* number_format,
+ v8::Handle<v8::Object> resolved) {
+ icu::UnicodeString pattern;
+ number_format->toPattern(pattern);
+ resolved->Set(v8::String::New("pattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+
+ // Set resolved currency code in options.currency if not empty.
+ icu::UnicodeString currency(number_format->getCurrency());
+ if (!currency.isEmpty()) {
+ resolved->Set(v8::String::New("currency"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ currency.getBuffer()), currency.length()));
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat would.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ resolved->Set(v8::String::New("numberingSystem"), v8::String::New(ns));
+ } else {
+ resolved->Set(v8::String::New("numberingSystem"), v8::Undefined());
+ }
+ delete numbering_system;
+
+ resolved->Set(v8::String::New("useGrouping"),
+ v8::Boolean::New(number_format->isGroupingUsed()));
+
+ resolved->Set(v8::String::New("minimumIntegerDigits"),
+ v8::Integer::New(number_format->getMinimumIntegerDigits()));
+
+ resolved->Set(v8::String::New("minimumFractionDigits"),
+ v8::Integer::New(number_format->getMinimumFractionDigits()));
+
+ resolved->Set(v8::String::New("maximumFractionDigits"),
+ v8::Integer::New(number_format->getMaximumFractionDigits()));
+
+ if (resolved->HasOwnProperty(v8::String::New("minimumSignificantDigits"))) {
+ resolved->Set(v8::String::New("minimumSignificantDigits"), v8::Integer::New(
+ number_format->getMinimumSignificantDigits()));
+ }
+
+ if (resolved->HasOwnProperty(v8::String::New("maximumSignificantDigits"))) {
+ resolved->Set(v8::String::New("maximumSignificantDigits"), v8::Integer::New(
+ number_format->getMaximumSignificantDigits()));
+ }
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ resolved->Set(v8::String::New("locale"), v8::String::New(result));
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ resolved->Set(v8::String::New("locale"), v8::String::New("und"));
+ }
+}
+
+} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/number-format.h b/deps/v8/src/extensions/i18n/number-format.h
new file mode 100644
index 0000000000..d4dbc4d6f3
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/number-format.h
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
+#define V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class DecimalFormat;
+}
+
+namespace v8_i18n {
+
+class NumberFormat {
+ public:
+ static void JSCreateNumberFormat(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::DecimalFormat* UnpackNumberFormat(v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the NumberFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteNumberFormat(v8::Isolate* isolate,
+ v8::Persistent<v8::Object>* object,
+ void* param);
+
+ // Formats number and returns corresponding string.
+ static void JSInternalFormat(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ // Parses a string and returns a number.
+ static void JSInternalParse(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ NumberFormat();
+};
+
+} // namespace v8_i18n
+
+#endif // V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
diff --git a/deps/v8/src/extensions/i18n/number-format.js b/deps/v8/src/extensions/i18n/number-format.js
new file mode 100644
index 0000000000..1cd3db1355
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/number-format.js
@@ -0,0 +1,295 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+/**
+ * Verifies that the input is a well-formed ISO 4217 currency code.
+ * Don't uppercase to test. It could convert invalid code into a valid one.
+ * For example \u00DFP (Eszett+P) becomes SSP.
+ */
+function isWellFormedCurrencyCode(currency) {
+ return typeof currency == "string" &&
+ currency.length == 3 &&
+ currency.match(/[^A-Za-z]/) == null;
+}
+
+
+/**
+ * Returns the valid digit count for a property, or throws RangeError on
+ * a value out of the range.
+ */
+function getNumberOption(options, property, min, max, fallback) {
+ var value = options[property];
+ if (value !== undefined) {
+ value = Number(value);
+ if (isNaN(value) || value < min || value > max) {
+ throw new RangeError(property + ' value is out of range.');
+ }
+ return Math.floor(value);
+ }
+
+ return fallback;
+}
+
+
+/**
+ * Initializes the given object so it's a valid NumberFormat instance.
+ * Useful for subclassing.
+ */
+function initializeNumberFormat(numberFormat, locales, options) {
+ native function NativeJSCreateNumberFormat();
+
+ if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new TypeError('Trying to re-initialize NumberFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'numberformat');
+
+ var locale = resolveLocale('numberformat', locales, options);
+
+ var internalOptions = {};
+ defineWEProperty(internalOptions, 'style', getOption(
+ 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
+
+ var currency = getOption('currency', 'string');
+ if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
+ throw new RangeError('Invalid currency code: ' + currency);
+ }
+
+ if (internalOptions.style === 'currency' && currency === undefined) {
+ throw new TypeError('Currency code is required with currency style.');
+ }
+
+ var currencyDisplay = getOption(
+ 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
+ if (internalOptions.style === 'currency') {
+ defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
+ defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
+ }
+
+ // Digit ranges.
+ var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
+ defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
+
+ var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
+ defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+
+ var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
+ defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+
+ var mnsd = options['minimumSignificantDigits'];
+ var mxsd = options['maximumSignificantDigits'];
+ if (mnsd !== undefined || mxsd !== undefined) {
+ mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+ defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
+
+ mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
+ defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
+ }
+
+ // Grouping.
+ defineWEProperty(internalOptions, 'useGrouping', getOption(
+ 'useGrouping', 'boolean', undefined, true));
+
+ // ICU prefers options to be passed using -u- extension key/values for
+ // number format, so we need to build that.
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = Object.defineProperties({}, {
+ currency: {writable: true},
+ currencyDisplay: {writable: true},
+ locale: {writable: true},
+ maximumFractionDigits: {writable: true},
+ minimumFractionDigits: {writable: true},
+ minimumIntegerDigits: {writable: true},
+ numberingSystem: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ style: {value: internalOptions.style, writable: true},
+ useGrouping: {writable: true}
+ });
+ if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
+ defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
+ }
+ if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
+ defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
+ }
+ var formatter = NativeJSCreateNumberFormat(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // We can't get information about number or currency style from ICU, so we
+ // assume user request was fulfilled.
+ if (internalOptions.style === 'currency') {
+ Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
+ }
+
+ Object.defineProperty(numberFormat, 'formatter', {value: formatter});
+ Object.defineProperty(numberFormat, 'resolved', {value: resolved});
+ Object.defineProperty(numberFormat, '__initializedIntlObject',
+ {value: 'numberformat'});
+
+ return numberFormat;
+}
+
+
+/**
+ * Constructs Intl.NumberFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'NumberFormat', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.NumberFormat(locales, options);
+ }
+
+ return initializeNumberFormat(toObject(this), locales, options);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+
+
+/**
+ * NumberFormat resolvedOptions method.
+ */
+%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'numberformat') {
+ throw new TypeError('resolvedOptions method called on a non-object' +
+ ' or on a object that is not Intl.NumberFormat.');
+ }
+
+ var format = this;
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ style: format.resolved.style,
+ useGrouping: format.resolved.useGrouping,
+ minimumIntegerDigits: format.resolved.minimumIntegerDigits,
+ minimumFractionDigits: format.resolved.minimumFractionDigits,
+ maximumFractionDigits: format.resolved.maximumFractionDigits,
+ };
+
+ if (result.style === 'currency') {
+ defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currencyDisplay',
+ format.resolved.currencyDisplay);
+ }
+
+ if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
+ defineWECProperty(result, 'minimumSignificantDigits',
+ format.resolved.minimumSignificantDigits);
+ }
+
+ if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
+ defineWECProperty(result, 'maximumSignificantDigits',
+ format.resolved.maximumSignificantDigits);
+ }
+
+ return result;
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('numberformat', locales, arguments[1]);
+ },
+ ATTRIBUTES.DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(value)
+ * according to the effective locale and the formatting options of this
+ * NumberFormat.
+ */
+function formatNumber(formatter, value) {
+ native function NativeJSInternalNumberFormat();
+
+ // Spec treats -0 and +0 as 0.
+ var number = Number(value);
+ if (number === -0) {
+ number = 0;
+ }
+
+ return NativeJSInternalNumberFormat(formatter.formatter, number);
+}
+
+
+/**
+ * Returns a Number that represents string value that was passed in.
+ */
+function parseNumber(formatter, value) {
+ native function NativeJSInternalNumberParse();
+
+ return NativeJSInternalNumberParse(formatter.formatter, String(value));
+}
+
+
+addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
diff --git a/deps/v8/src/extensions/i18n/overrides.js b/deps/v8/src/extensions/i18n/overrides.js
new file mode 100644
index 0000000000..b2d60b3fc6
--- /dev/null
+++ b/deps/v8/src/extensions/i18n/overrides.js
@@ -0,0 +1,220 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation is broken into separate files for
+// each service. The build system combines them together into one
+// Intl namespace.
+
+
+// Save references to Intl objects and methods we use, for added security.
+var savedObjects = {
+ 'collator': Intl.Collator,
+ 'numberformat': Intl.NumberFormat,
+ 'dateformatall': Intl.DateTimeFormat,
+ 'dateformatdate': Intl.DateTimeFormat,
+ 'dateformattime': Intl.DateTimeFormat
+};
+
+
+// Default (created with undefined locales and options parameters) collator,
+// number and date format instances. They'll be created as needed.
+var defaultObjects = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformatall': undefined,
+ 'dateformatdate': undefined,
+ 'dateformattime': undefined,
+};
+
+
+/**
+ * Returns cached or newly created instance of a given service.
+ * We cache only default instances (where no locales or options are provided).
+ */
+function cachedOrNewService(service, locales, options, defaults) {
+ var useOptions = (defaults === undefined) ? options : defaults;
+ if (locales === undefined && options === undefined) {
+ if (defaultObjects[service] === undefined) {
+ defaultObjects[service] = new savedObjects[service](locales, useOptions);
+ }
+ return defaultObjects[service];
+ }
+ return new savedObjects[service](locales, useOptions);
+}
+
+
+/**
+ * Compares this and that, and returns less than 0, 0 or greater than 0 value.
+ * Overrides the built-in method.
+ */
+Object.defineProperty(String.prototype, 'localeCompare', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (this === undefined || this === null) {
+ throw new TypeError('Method invoked on undefined or null value.');
+ }
+
+ var locales = arguments[1];
+ var options = arguments[2];
+ var collator = cachedOrNewService('collator', locales, options);
+ return compare(collator, this, that);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName(String.prototype.localeCompare, 'localeCompare');
+%FunctionRemovePrototype(String.prototype.localeCompare);
+%SetNativeFlag(String.prototype.localeCompare);
+
+
+/**
+ * Formats a Number object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used.
+ */
+Object.defineProperty(Number.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!(this instanceof Number) && typeof(this) !== 'number') {
+ throw new TypeError('Method invoked on an object that is not Number.');
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ var numberFormat = cachedOrNewService('numberformat', locales, options);
+ return formatNumber(numberFormat, this);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName(Number.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype(Number.prototype.toLocaleString);
+%SetNativeFlag(Number.prototype.toLocaleString);
+
+
+/**
+ * Returns actual formatted date or fails if date parameter is invalid.
+ */
+function toLocaleDateTime(date, locales, options, required, defaults, service) {
+ if (!(date instanceof Date)) {
+ throw new TypeError('Method invoked on an object that is not Date.');
+ }
+
+ if (isNaN(date)) {
+ return 'Invalid Date';
+ }
+
+ var internalOptions = toDateTimeOptions(options, required, defaults);
+
+ var dateFormat =
+ cachedOrNewService(service, locales, options, internalOptions);
+
+ return formatDate(dateFormat, date);
+}
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - both date and time are
+ * present in the output.
+ */
+Object.defineProperty(Date.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'any', 'all', 'dateformatall');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName(Date.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype(Date.prototype.toLocaleString);
+%SetNativeFlag(Date.prototype.toLocaleString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only date is present
+ * in the output.
+ */
+Object.defineProperty(Date.prototype, 'toLocaleDateString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'date', 'date', 'dateformatdate');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName(Date.prototype.toLocaleDateString, 'toLocaleDateString');
+%FunctionRemovePrototype(Date.prototype.toLocaleDateString);
+%SetNativeFlag(Date.prototype.toLocaleDateString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only time is present
+ * in the output.
+ */
+Object.defineProperty(Date.prototype, 'toLocaleTimeString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'time', 'time', 'dateformattime');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName(Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
+%FunctionRemovePrototype(Date.prototype.toLocaleTimeString);
+%SetNativeFlag(Date.prototype.toLocaleTimeString);
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index e5a3009e80..32bc07de8b 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -133,6 +133,12 @@ void StatisticsExtension::GetCounters(
"cell_space_available_bytes");
AddNumber(result, heap->cell_space()->CommittedMemory(),
"cell_space_commited_bytes");
+ AddNumber(result, heap->property_cell_space()->Size(),
+ "property_cell_space_live_bytes");
+ AddNumber(result, heap->property_cell_space()->Available(),
+ "property_cell_space_available_bytes");
+ AddNumber(result, heap->property_cell_space()->CommittedMemory(),
+ "property_cell_space_commited_bytes");
AddNumber(result, heap->lo_space()->Size(),
"lo_space_live_bytes");
AddNumber(result, heap->lo_space()->Available(),
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index c47b57d017..63b2379692 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -31,6 +31,7 @@
#include "debug.h"
#include "execution.h"
#include "factory.h"
+#include "isolate-inl.h"
#include "macro-assembler.h"
#include "objects.h"
#include "objects-visiting.h"
@@ -259,6 +260,32 @@ Handle<String> Factory::NewConsString(Handle<String> first,
}
+template<typename SinkChar, typename StringType>
+Handle<String> ConcatStringContent(Handle<StringType> result,
+ Handle<String> first,
+ Handle<String> second) {
+ DisallowHeapAllocation pointer_stays_valid;
+ SinkChar* sink = result->GetChars();
+ String::WriteToFlat(*first, sink, 0, first->length());
+ String::WriteToFlat(*second, sink + first->length(), 0, second->length());
+ return result;
+}
+
+
+Handle<String> Factory::NewFlatConcatString(Handle<String> first,
+ Handle<String> second) {
+ int total_length = first->length() + second->length();
+ if (first->IsOneByteRepresentationUnderneath() &&
+ second->IsOneByteRepresentationUnderneath()) {
+ return ConcatStringContent<uint8_t>(
+ NewRawOneByteString(total_length), first, second);
+ } else {
+ return ConcatStringContent<uc16>(
+ NewRawTwoByteString(total_length), first, second);
+ }
+}
+
+
Handle<String> Factory::NewSubString(Handle<String> str,
int begin,
int end) {
@@ -408,27 +435,17 @@ Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
Handle<Script> Factory::NewScript(Handle<String> source) {
// Generate id for this script.
- int id;
Heap* heap = isolate()->heap();
- if (heap->last_script_id()->IsUndefined()) {
- // Script ids start from one.
- id = 1;
- } else {
- // Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(heap->last_script_id())->value();
- id++;
- if (!Smi::IsValid(id)) {
- id = 0;
- }
- }
- heap->SetLastScriptId(Smi::FromInt(id));
+ int id = heap->last_script_id()->value() + 1;
+ if (!Smi::IsValid(id) || id < 0) id = 1;
+ heap->set_last_script_id(Smi::FromInt(id));
// Create and initialize script object.
Handle<Foreign> wrapper = NewForeign(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source);
script->set_name(heap->undefined_value());
- script->set_id(heap->last_script_id());
+ script->set_id(Smi::FromInt(id));
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
script->set_data(heap->undefined_value());
@@ -482,13 +499,21 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
}
-Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
- Handle<Object> value) {
+Handle<Cell> Factory::NewCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateCell(*value),
+ Cell);
+}
+
+
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
+ isolate()->heap()->AllocatePropertyCell(*value),
+ PropertyCell);
}
@@ -636,7 +661,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
result->is_compiled() &&
!function_info->is_toplevel() &&
function_info->allows_lazy_compilation() &&
- !function_info->optimization_disabled()) {
+ !function_info->optimization_disabled() &&
+ !isolate()->DebuggerHasBreakPoints()) {
result->MarkForLazyRecompilation();
}
return result;
@@ -1072,6 +1098,16 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
}
+Handle<JSDataView> Factory::NewJSDataView() {
+ JSFunction* data_view_fun =
+ isolate()->context()->native_context()->data_view_fun();
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(data_view_fun),
+ JSDataView);
+}
+
+
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
JSFunction* typed_array_fun;
Context* native_context = isolate()->context()->native_context();
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index d59d7423ae..0cb7157729 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -152,6 +152,10 @@ class Factory {
Handle<String> NewConsString(Handle<String> first,
Handle<String> second);
+ // Create a new sequential string containing the concatenation of the inputs.
+ Handle<String> NewFlatConcatString(Handle<String> first,
+ Handle<String> second);
+
// Create a new string object which holds a substring of a string.
Handle<String> NewSubString(Handle<String> str,
int begin,
@@ -235,8 +239,9 @@ class Factory {
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
- Handle<Object> value);
+ Handle<Cell> NewCell(Handle<Object> value);
+
+ Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
Handle<Map> NewMap(
InstanceType type,
@@ -327,6 +332,8 @@ class Factory {
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
+ Handle<JSDataView> NewJSDataView();
+
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index b70a532afc..a0f907db34 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -178,8 +178,7 @@ DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
-// TODO(wingo): Re-enable when GC bug that appeared in r15060 is gone.
-// DEFINE_implication(harmony, harmony_generators)
+DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
@@ -196,9 +195,6 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
-// TODO(hpayer): We will remove this flag as soon as we have pretenuring
-// support for specific allocation sites.
-DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -218,6 +214,7 @@ DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
+DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
@@ -238,6 +235,7 @@ DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_bool(trace_migration, false, "trace object migration")
@@ -268,8 +266,6 @@ DEFINE_bool(unreachable_code_elimination, false,
"eliminate unreachable code (hidden behind soft deopts)")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
-DEFINE_bool(optimize_constructed_arrays, true,
- "Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -291,10 +287,10 @@ DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in "
"safe operations")
-DEFINE_bool(parallel_recompilation, false,
+DEFINE_bool(parallel_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 3,
+DEFINE_int(parallel_recompilation_queue_length, 8,
"the length of the parallel compilation queue")
DEFINE_int(parallel_recompilation_delay, 0,
"artificial compilation delay in ms")
@@ -318,7 +314,7 @@ DEFINE_bool(weighted_back_edges, false,
// 0x1700 fits in the immediate field of an ARM instruction.
DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 15,
+DEFINE_int(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
@@ -365,6 +361,7 @@ DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
// bootstrapper.cc
+DEFINE_bool(enable_i18n, true, "enable i18n extension")
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
@@ -400,6 +397,7 @@ DEFINE_bool(trace_opt, false, "trace lazy optimization")
DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(always_opt, false, "always try to optimize functions")
+DEFINE_bool(always_osr, false, "always try to OSR functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
DEFINE_bool(trace_stub_failures, false,
@@ -472,7 +470,7 @@ DEFINE_bool(trace_external_memory, false,
"it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
-DEFINE_bool(weak_embedded_maps_in_optimized_code, false,
+DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
@@ -481,7 +479,7 @@ DEFINE_bool(flush_code_incrementally, true,
DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
- "old code")
+ "old code (required for code flushing)")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 282bf20ac4..855e20712c 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -34,7 +34,7 @@
#include "smart-pointers.h"
#include "string-stream.h"
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#endif
@@ -520,7 +520,7 @@ void FlagList::ResetAllFlags() {
// static
void FlagList::PrintHelp() {
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
CpuFeatures::PrintTarget();
CpuFeatures::Probe();
CpuFeatures::PrintFeatures();
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 1228ccf18d..8d10645d1d 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -116,7 +116,7 @@ inline Object** StackHandler::code_address() const {
}
-inline StackFrame::StackFrame(StackFrameIterator* iterator)
+inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
: iterator_(iterator), isolate_(iterator_->isolate()) {
}
@@ -136,22 +136,34 @@ inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
}
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
+ if (return_address_location_resolver_ == NULL) {
+ return pc_address;
+ } else {
+ return reinterpret_cast<Address*>(
+ return_address_location_resolver_(
+ reinterpret_cast<uintptr_t>(pc_address)));
+ }
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+inline EntryConstructFrame::EntryConstructFrame(
+ StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {
}
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
@@ -201,7 +213,7 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
@@ -269,54 +281,51 @@ inline Object* JavaScriptFrame::function() const {
}
-inline StubFrame::StubFrame(StackFrameIterator* iterator)
+inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {
}
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
- StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+ StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
- StackFrameIterator* iterator) : StandardFrame(iterator) {
+ StackFrameIteratorBase* iterator) : StandardFrame(iterator) {
}
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
}
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate)
: iterator_(isolate) {
if (!done()) Advance();
}
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate, ThreadLocalTop* top)
: iterator_(isolate, top) {
if (!done()) Advance();
}
-template<typename Iterator>
-inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
+inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
// problematic that we can't use the safe-cast operator to cast to
// the JavaScript frame type, because we may encounter arguments
@@ -327,43 +336,10 @@ inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
}
-template<typename Iterator>
-JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, StackFrame::Id id)
- : iterator_(isolate) {
- AdvanceToId(id);
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
- do {
- iterator_.Advance();
- } while (!iterator_.done() && !iterator_.frame()->is_java_script());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
- if (!frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- ASSERT(iterator_.frame()->is_arguments_adaptor());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
- iterator_.Reset();
- if (!done()) Advance();
+inline JavaScriptFrame* SafeStackFrameIterator::frame() const {
+ ASSERT(!done());
+ ASSERT(frame_->is_java_script());
+ return static_cast<JavaScriptFrame*>(frame_);
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 53f510849e..0408aa9074 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -43,19 +43,8 @@ namespace v8 {
namespace internal {
-static ReturnAddressLocationResolver return_address_location_resolver = NULL;
-
-
-// Resolves pc_address through the resolution address function if one is set.
-static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
- if (return_address_location_resolver == NULL) {
- return pc_address;
- } else {
- return reinterpret_cast<Address*>(
- return_address_location_resolver(
- reinterpret_cast<uintptr_t>(pc_address)));
- }
-}
+ReturnAddressLocationResolver
+ StackFrame::return_address_location_resolver_ = NULL;
// Iterator that supports traversing the stack handlers of a
@@ -88,39 +77,29 @@ class StackHandlerIterator BASE_EMBEDDED {
#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator(Isolate* isolate)
+StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
+ bool can_access_heap_objects)
: isolate_(isolate),
STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(t),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
+ can_access_heap_objects_(can_access_heap_objects) {
}
-StackFrameIterator::StackFrameIterator(Isolate* isolate,
- bool use_top, Address fp, Address sp)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(use_top ? isolate_->thread_local_top() : NULL),
- fp_(use_top ? NULL : fp), sp_(sp),
- advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
- &StackFrameIterator::AdvanceWithoutHandler) {
- if (use_top || fp != NULL) {
- Reset();
- }
+#undef INITIALIZE_SINGLETON
+
+
+StackFrameIterator::StackFrameIterator(Isolate* isolate)
+ : StackFrameIteratorBase(isolate, true) {
+ Reset(isolate->thread_local_top());
}
-#undef INITIALIZE_SINGLETON
+StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
+ : StackFrameIteratorBase(isolate, true) {
+ Reset(t);
+}
-void StackFrameIterator::AdvanceWithHandler() {
+
+void StackFrameIterator::Advance() {
ASSERT(!done());
// Compute the state of the calling frame before restoring
// callee-saved registers and unwinding handlers. This allows the
@@ -143,37 +122,17 @@ void StackFrameIterator::AdvanceWithHandler() {
}
-void StackFrameIterator::AdvanceWithoutHandler() {
- // A simpler version of Advance which doesn't care about handler.
- ASSERT(!done());
- StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
- frame_ = SingletonFor(type, &state);
-}
-
-
-void StackFrameIterator::Reset() {
+void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame::State state;
- StackFrame::Type type;
- if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(
- Isolate::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(
- Isolate::handler(thread_));
- } else {
- ASSERT(fp_ != NULL);
- state.fp = fp_;
- state.sp = sp_;
- state.pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
- type = StackFrame::ComputeType(isolate(), &state);
- }
+ StackFrame::Type type = ExitFrame::GetStateForFramePointer(
+ Isolate::c_entry_fp(top), &state);
+ handler_ = StackHandler::FromAddress(Isolate::handler(top));
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
if (type == StackFrame::NONE) return NULL;
StackFrame* result = SingletonFor(type);
@@ -183,7 +142,7 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
}
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: result = &field##_; break;
@@ -202,6 +161,33 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
+JavaScriptFrameIterator::JavaScriptFrameIterator(
+ Isolate* isolate, StackFrame::Id id)
+ : iterator_(isolate) {
+ while (!done()) {
+ Advance();
+ if (frame()->id() == id) return;
+ }
+}
+
+
+void JavaScriptFrameIterator::Advance() {
+ do {
+ iterator_.Advance();
+ } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
+ if (!frame()->has_adapted_arguments()) return;
+ iterator_.Advance();
+ ASSERT(iterator_.frame()->is_arguments_adaptor());
+}
+
+
+// -------------------------------------------------------------------------
+
+
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
: JavaScriptFrameIterator(isolate) {
if (!done() && !IsValidFrame()) Advance();
@@ -228,85 +214,80 @@ bool StackTraceFrameIterator::IsValidFrame() {
// -------------------------------------------------------------------------
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
- if (!validator_.IsValid(fp)) return false;
- Address sp = ExitFrame::ComputeStackPointer(fp);
- if (!validator_.IsValid(sp)) return false;
- StackFrame::State state;
- ExitFrame::FillState(fp, sp, &state);
- if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
- return false;
- }
- return *state.pc_address != NULL;
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
- Isolate* isolate)
- : isolate_(isolate) {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() + 1);
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() - 1);
-}
-
-
SafeStackFrameIterator::SafeStackFrameIterator(
Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(isolate),
- stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
- is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
- is_working_iterator_(is_valid_top_ || is_valid_fp_),
- iteration_done_(!is_working_iterator_),
- iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
-}
+ Address fp, Address sp, Address js_entry_sp)
+ : StackFrameIteratorBase(isolate, false),
+ low_bound_(sp),
+ high_bound_(js_entry_sp),
+ top_frame_type_(StackFrame::NONE) {
+ StackFrame::State state;
+ StackFrame::Type type;
+ ThreadLocalTop* top = isolate->thread_local_top();
+ if (IsValidTop(top)) {
+ type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
+ top_frame_type_ = type;
+ } else if (IsValidStackAddress(fp)) {
+ ASSERT(fp != NULL);
+ state.fp = fp;
+ state.sp = sp;
+ state.pc_address = StackFrame::ResolveReturnAddressLocation(
+ reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+ // StackFrame::ComputeType will read both kContextOffset and kMarkerOffset,
+ // we check only that kMarkerOffset is within the stack bounds and do
+ // compile time check that kContextOffset slot is pushed on the stack before
+ // kMarkerOffset.
+ STATIC_ASSERT(StandardFrameConstants::kMarkerOffset <
+ StandardFrameConstants::kContextOffset);
+ Address frame_marker = fp + StandardFrameConstants::kMarkerOffset;
+ if (IsValidStackAddress(frame_marker)) {
+ type = StackFrame::ComputeType(this, &state);
+ top_frame_type_ = type;
+ } else {
+ // Mark the frame as JAVA_SCRIPT if we cannot determine its type.
+ // The frame anyways will be skipped.
+ type = StackFrame::JAVA_SCRIPT;
+ // Top frame is incomplete so we cannot reliably determine its type.
+ top_frame_type_ = StackFrame::NONE;
+ }
+ } else {
+ return;
+ }
+ if (SingletonFor(type) == NULL) return;
+ frame_ = SingletonFor(type, &state);
-bool SafeStackFrameIterator::is_active(Isolate* isolate) {
- return isolate->safe_stack_iterator_counter() > 0;
+ if (!done()) Advance();
}
-bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound) {
- ThreadLocalTop* top = isolate->thread_local_top();
+bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
Address fp = Isolate::c_entry_fp(top);
- ExitFrameValidator validator(low_bound, high_bound);
- if (!validator.IsValidFP(fp)) return false;
+ if (!IsValidExitFrame(fp)) return false;
+ // There should be at least one JS_ENTRY stack handler.
return Isolate::handler(top) != NULL;
}
-void SafeStackFrameIterator::Advance() {
- ASSERT(is_working_iterator_);
+void SafeStackFrameIterator::AdvanceOneFrame() {
ASSERT(!done());
- StackFrame* last_frame = iterator_.frame();
+ StackFrame* last_frame = frame_;
Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
- // Before advancing to the next stack frame, perform pointer validity tests
- iteration_done_ = !IsValidFrame(last_frame) ||
- !CanIterateHandles(last_frame, iterator_.handler()) ||
- !IsValidCaller(last_frame);
- if (iteration_done_) return;
-
- iterator_.Advance();
- if (iterator_.done()) return;
- // Check that we have actually moved to the previous frame in the stack
- StackFrame* prev_frame = iterator_.frame();
- iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
-}
+ // Before advancing to the next stack frame, perform pointer validity tests.
+ if (!IsValidFrame(last_frame) || !IsValidCaller(last_frame)) {
+ frame_ = NULL;
+ return;
+ }
+ // Advance to the previous frame.
+ StackFrame::State state;
+ StackFrame::Type type = frame_->GetCallerState(&state);
+ frame_ = SingletonFor(type, &state);
+ if (frame_ == NULL) return;
-bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
- StackHandler* handler) {
- // If StackIterator iterates over StackHandles, verify that
- // StackHandlerIterator can be instantiated (see StackHandlerIterator
- // constructor.)
- return !is_valid_top_ || (frame->sp() <= handler->address());
+ // Check that we have actually moved to the previous frame in the stack.
+ if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
+ frame_ = NULL;
+ }
}
@@ -323,8 +304,7 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// sure that caller FP address is valid.
Address caller_fp = Memory::Address_at(
frame->fp() + EntryFrameConstants::kCallerFPOffset);
- ExitFrameValidator validator(stack_validator_);
- if (!validator.IsValidFP(caller_fp)) return false;
+ if (!IsValidExitFrame(caller_fp)) return false;
} else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check
@@ -337,38 +317,35 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
}
frame->ComputeCallerState(&state);
return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
- iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
+ SingletonFor(frame->GetCallerState(&state)) != NULL;
}
-void SafeStackFrameIterator::Reset() {
- if (is_working_iterator_) {
- iterator_.Reset();
- iteration_done_ = false;
+bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
+ if (!IsValidStackAddress(fp)) return false;
+ Address sp = ExitFrame::ComputeStackPointer(fp);
+ if (!IsValidStackAddress(sp)) return false;
+ StackFrame::State state;
+ ExitFrame::FillState(fp, sp, &state);
+ if (!IsValidStackAddress(reinterpret_cast<Address>(state.pc_address))) {
+ return false;
}
+ return *state.pc_address != NULL;
}
-// -------------------------------------------------------------------------
-
-
-SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
- if (!done() && !frame()->is_java_script()) Advance();
-}
-
-
-void SafeStackTraceFrameIterator::Advance() {
+void SafeStackFrameIterator::Advance() {
while (true) {
- SafeJavaScriptFrameIterator::Advance();
+ AdvanceOneFrame();
if (done()) return;
- if (frame()->is_java_script()) return;
+ if (frame_->is_java_script()) return;
}
}
+// -------------------------------------------------------------------------
+
+
Code* StackFrame::GetSafepointData(Isolate* isolate,
Address inner_pointer,
SafepointEntry* safepoint_entry,
@@ -420,12 +397,13 @@ void StackFrame::IteratePc(ObjectVisitor* v,
void StackFrame::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver) {
- ASSERT(return_address_location_resolver == NULL);
- return_address_location_resolver = resolver;
+ ASSERT(return_address_location_resolver_ == NULL);
+ return_address_location_resolver_ = resolver;
}
-StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
+StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
+ State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
@@ -440,8 +418,9 @@ StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
// frames as normal JavaScript frames to avoid having to look
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
- if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
+ if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
+ Code::Kind kind = GetContainingCode(iterator->isolate(),
+ *(state->pc_address))->kind();
ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
}
@@ -449,15 +428,21 @@ StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
}
+#ifdef DEBUG
+bool StackFrame::can_access_heap_objects() const {
+ return iterator_->can_access_heap_objects_;
+}
+#endif
+
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
- return ComputeType(isolate(), state);
+ return ComputeType(iterator_, state);
}
Address StackFrame::UnpaddedFP() const {
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
@@ -545,6 +530,11 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
}
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+}
+
+
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
@@ -607,7 +597,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
- ASSERT(!SafeStackFrameIterator::is_active(isolate()));
+ ASSERT(can_access_heap_objects());
// Compute the safepoint information.
unsigned stack_slots = 0;
@@ -734,12 +724,12 @@ int JavaScriptFrame::GetArgumentsLength() const {
Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
- return function->unchecked_code();
+ return function->code();
}
int JavaScriptFrame::GetNumberOfIncomingArguments() const {
- ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+ ASSERT(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
JSFunction* function = JSFunction::cast(this->function());
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index f09c24a3fb..0a5b609442 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -47,7 +47,7 @@ int JSCallerSavedCode(int n);
// Forward declarations.
-class StackFrameIterator;
+class StackFrameIteratorBase;
class ThreadLocalTop;
class Isolate;
@@ -297,6 +297,10 @@ class StackFrame BASE_EMBEDDED {
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
+ // Resolves pc_address through the resolution address function if one is set.
+ static inline Address* ResolveReturnAddressLocation(Address* pc_address);
+
+
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
virtual void Print(StringStream* accumulator,
@@ -306,7 +310,7 @@ class StackFrame BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
protected:
- inline explicit StackFrame(StackFrameIterator* iterator);
+ inline explicit StackFrame(StackFrameIteratorBase* iterator);
virtual ~StackFrame() { }
// Compute the stack pointer for the calling frame.
@@ -321,13 +325,19 @@ class StackFrame BASE_EMBEDDED {
inline StackHandler* top_handler() const;
// Compute the stack frame type for the given state.
- static Type ComputeType(Isolate* isolate, State* state);
+ static Type ComputeType(const StackFrameIteratorBase* iterator, State* state);
+
+#ifdef DEBUG
+ bool can_access_heap_objects() const;
+#endif
private:
- const StackFrameIterator* iterator_;
+ const StackFrameIteratorBase* iterator_;
Isolate* isolate_;
State state_;
+ static ReturnAddressLocationResolver return_address_location_resolver_;
+
// Fill in the state of the calling frame.
virtual void ComputeCallerState(State* state) const = 0;
@@ -337,6 +347,7 @@ class StackFrame BASE_EMBEDDED {
static const intptr_t kIsolateTag = 1;
friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
@@ -362,7 +373,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
- inline explicit EntryFrame(StackFrameIterator* iterator);
+ inline explicit EntryFrame(StackFrameIteratorBase* iterator);
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@@ -373,7 +384,7 @@ class EntryFrame: public StackFrame {
virtual void ComputeCallerState(State* state) const;
virtual Type GetCallerState(State* state) const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -389,10 +400,10 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
- inline explicit EntryConstructFrame(StackFrameIterator* iterator);
+ inline explicit EntryConstructFrame(StackFrameIteratorBase* iterator);
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -423,14 +434,14 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
- inline explicit ExitFrame(StackFrameIterator* iterator);
+ inline explicit ExitFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
private:
virtual void ComputeCallerState(State* state) const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -456,7 +467,7 @@ class StandardFrame: public StackFrame {
}
protected:
- inline explicit StandardFrame(StackFrameIterator* iterator);
+ inline explicit StandardFrame(StackFrameIteratorBase* iterator);
virtual void ComputeCallerState(State* state) const;
@@ -493,7 +504,7 @@ class StandardFrame: public StackFrame {
private:
friend class StackFrame;
- friend class StackFrameIterator;
+ friend class SafeStackFrameIterator;
};
@@ -599,7 +610,7 @@ class JavaScriptFrame: public StandardFrame {
bool print_line_number);
protected:
- inline explicit JavaScriptFrame(StackFrameIterator* iterator);
+ inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
@@ -612,7 +623,7 @@ class JavaScriptFrame: public StandardFrame {
private:
inline Object* function_slot_object() const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -627,13 +638,13 @@ class StubFrame : public StandardFrame {
virtual Code* unchecked_code() const;
protected:
- inline explicit StubFrame(StackFrameIterator* iterator);
+ inline explicit StubFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
virtual int GetNumberOfIncomingArguments() const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -656,12 +667,12 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
- inline explicit OptimizedFrame(StackFrameIterator* iterator);
+ inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
private:
JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -686,14 +697,14 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
- inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
+ inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
virtual int GetNumberOfIncomingArguments() const;
virtual Address GetCallerStackPointer() const;
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -713,12 +724,12 @@ class InternalFrame: public StandardFrame {
}
protected:
- inline explicit InternalFrame(StackFrameIterator* iterator);
+ inline explicit InternalFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -746,12 +757,12 @@ class StubFailureTrampolineFrame: public StandardFrame {
protected:
inline explicit StubFailureTrampolineFrame(
- StackFrameIterator* iterator);
+ StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -767,50 +778,30 @@ class ConstructFrame: public InternalFrame {
}
protected:
- inline explicit ConstructFrame(StackFrameIterator* iterator);
+ inline explicit ConstructFrame(StackFrameIteratorBase* iterator);
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
-class StackFrameIterator BASE_EMBEDDED {
+class StackFrameIteratorBase BASE_EMBEDDED {
public:
- // An iterator that iterates over the isolate's current thread's stack,
- explicit StackFrameIterator(Isolate* isolate);
-
- // An iterator that iterates over a given thread's stack.
- StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
-
- // An iterator that can start from a given FP address.
- // If use_top, then work as usual, if fp isn't NULL, use it,
- // otherwise, do nothing.
- StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
-
- StackFrame* frame() const {
- ASSERT(!done());
- return frame_;
- }
-
Isolate* isolate() const { return isolate_; }
bool done() const { return frame_ == NULL; }
- void Advance() { (this->*advance_)(); }
- // Go back to the first frame.
- void Reset();
+ protected:
+ // An iterator that iterates over a given thread's stack.
+ StackFrameIteratorBase(Isolate* isolate, bool can_access_heap_objects);
- private:
Isolate* isolate_;
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
StackFrame* frame_;
StackHandler* handler_;
- ThreadLocalTop* thread_;
- Address fp_;
- Address sp_;
- void (StackFrameIterator::*advance_)();
+ const bool can_access_heap_objects_;
StackHandler* handler() const {
ASSERT(!done());
@@ -822,44 +813,40 @@ class StackFrameIterator BASE_EMBEDDED {
// A helper function, can return a NULL pointer.
StackFrame* SingletonFor(StackFrame::Type type);
- void AdvanceWithHandler();
- void AdvanceWithoutHandler();
-
+ private:
friend class StackFrame;
- friend class SafeStackFrameIterator;
- DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
+ DISALLOW_COPY_AND_ASSIGN(StackFrameIteratorBase);
};
-// Iterator that supports iterating through all JavaScript frames.
-template<typename Iterator>
-class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
+class StackFrameIterator: public StackFrameIteratorBase {
public:
- inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
+ // An iterator that iterates over the isolate's current thread's stack,
+ explicit StackFrameIterator(Isolate* isolate);
+ // An iterator that iterates over a given thread's stack.
+ StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
+ StackFrame* frame() const {
+ ASSERT(!done());
+ return frame_;
+ }
+ void Advance();
- // Skip frames until the frame with the given id is reached.
- explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
+ private:
+ // Go back to the first frame.
+ void Reset(ThreadLocalTop* top);
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
+ DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
+};
- JavaScriptFrameIteratorTemp(Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
- JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(isolate, fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
+// Iterator that supports iterating through all JavaScript frames.
+class JavaScriptFrameIterator BASE_EMBEDDED {
+ public:
+ inline explicit JavaScriptFrameIterator(Isolate* isolate);
+ inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
+ // Skip frames until the frame with the given id is reached.
+ JavaScriptFrameIterator(Isolate* isolate, StackFrame::Id id);
inline JavaScriptFrame* frame() const;
@@ -871,26 +858,17 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
// arguments.
void AdvanceToArgumentsFrame();
- // Go back to the first frame.
- void Reset();
-
private:
- inline void AdvanceToId(StackFrame::Id id);
-
- Iterator iterator_;
+ StackFrameIterator iterator_;
};
-typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
-
-
// NOTE: The stack trace frame iterator is an iterator that only
// traverse proper JavaScript frames; that is JavaScript frames that
// have proper JavaScript functions. This excludes the problematic
// functions in runtime.js.
class StackTraceFrameIterator: public JavaScriptFrameIterator {
public:
- StackTraceFrameIterator();
explicit StackTraceFrameIterator(Isolate* isolate);
void Advance();
@@ -899,95 +877,31 @@ class StackTraceFrameIterator: public JavaScriptFrameIterator {
};
-class SafeStackFrameIterator BASE_EMBEDDED {
+class SafeStackFrameIterator: public StackFrameIteratorBase {
public:
SafeStackFrameIterator(Isolate* isolate,
Address fp, Address sp,
- Address low_bound, Address high_bound);
-
- StackFrame* frame() const {
- ASSERT(is_working_iterator_);
- return iterator_.frame();
- }
-
- bool done() const { return iteration_done_ ? true : iterator_.done(); }
+ Address js_entry_sp);
+ inline JavaScriptFrame* frame() const;
void Advance();
- void Reset();
-
- static bool is_active(Isolate* isolate);
- static bool IsWithinBounds(
- Address low_bound, Address high_bound, Address addr) {
- return low_bound <= addr && addr <= high_bound;
- }
+ StackFrame::Type top_frame_type() const { return top_frame_type_; }
private:
- class StackAddressValidator {
- public:
- StackAddressValidator(Address low_bound, Address high_bound)
- : low_bound_(low_bound), high_bound_(high_bound) { }
- bool IsValid(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
- }
- private:
- Address low_bound_;
- Address high_bound_;
- };
-
- class ExitFrameValidator {
- public:
- explicit ExitFrameValidator(const StackAddressValidator& validator)
- : validator_(validator) { }
- ExitFrameValidator(Address low_bound, Address high_bound)
- : validator_(low_bound, high_bound) { }
- bool IsValidFP(Address fp);
- private:
- StackAddressValidator validator_;
- };
+ void AdvanceOneFrame();
bool IsValidStackAddress(Address addr) const {
- return stack_validator_.IsValid(addr);
+ return low_bound_ <= addr && addr <= high_bound_;
}
- bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound);
-
- // This is a nasty hack to make sure the active count is incremented
- // before the constructor for the embedded iterator is invoked. This
- // is needed because the constructor will start looking at frames
- // right away and we need to make sure it doesn't start inspecting
- // heap objects.
- class ActiveCountMaintainer BASE_EMBEDDED {
- public:
- explicit ActiveCountMaintainer(Isolate* isolate);
- ~ActiveCountMaintainer();
- private:
- Isolate* isolate_;
- };
-
- ActiveCountMaintainer maintainer_;
- StackAddressValidator stack_validator_;
- const bool is_valid_top_;
- const bool is_valid_fp_;
- const bool is_working_iterator_;
- bool iteration_done_;
- StackFrameIterator iterator_;
-};
+ bool IsValidExitFrame(Address fp) const;
+ bool IsValidTop(ThreadLocalTop* top) const;
-
-typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
- SafeJavaScriptFrameIterator;
-
-
-class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
- public:
- explicit SafeStackTraceFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
- void Advance();
+ const Address low_bound_;
+ const Address high_bound_;
+ StackFrame::Type top_frame_type_;
};
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index bad634cf3f..c1350a14d9 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -473,7 +473,7 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
+ TypeFeedbackId id, Handle<Cell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry, zone());
}
@@ -940,6 +940,11 @@ void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDebugBreakInOptimizedCode(CallRuntime* expr) {
+ context()->Plug(handle(Smi::FromInt(0), isolate()));
+}
+
+
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
@@ -1230,13 +1235,7 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
}
-void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- VisitForAccumulatorValue(expr);
-
- // Exit all nested statements.
+void FullCodeGenerator::EmitUnwindBeforeReturn() {
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
@@ -1244,7 +1243,15 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
+}
+
+void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ VisitForAccumulatorValue(expr);
+ EmitUnwindBeforeReturn();
EmitReturnSequence();
}
@@ -1543,7 +1550,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
- context()->Plug(expr->handle());
+ context()->Plug(expr->value());
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 68263a5dc6..7e6450655f 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -123,14 +123,15 @@ class FullCodeGenerator: public AstVisitor {
static const int kMaxBackEdgeWeight = 127;
+ // Platform-specific code size multiplier.
#if V8_TARGET_ARCH_IA32
- static const int kBackEdgeDistanceUnit = 100;
+ static const int kCodeSizeMultiplier = 100;
#elif V8_TARGET_ARCH_X64
- static const int kBackEdgeDistanceUnit = 162;
+ static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
- static const int kBackEdgeDistanceUnit = 142;
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
- static const int kBackEdgeDistanceUnit = 142;
+ static const int kCodeSizeMultiplier = 142;
#else
#error Unsupported target architecture.
#endif
@@ -331,7 +332,7 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to split control flow and avoid a branch to the
// fall-through label if it is set up.
-#ifdef V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_MIPS
void Split(Condition cc,
Register lhs,
const Operand& rhs,
@@ -410,10 +411,10 @@ class FullCodeGenerator: public AstVisitor {
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
- // Generator code to return a fresh iterator result object. The "value"
- // property is set to a value popped from the stack, and "done" is set
- // according to the argument.
- void EmitReturnIteratorResult(bool done);
+ // Generate code to create an iterator result object. The "value" property is
+ // set to a value popped from the stack, and "done" is set according to the
+ // argument. The result object is left in the result register.
+ void EmitCreateIteratorResult(bool done);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
@@ -437,8 +438,7 @@ class FullCodeGenerator: public AstVisitor {
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id,
- Handle<JSGlobalPropertyCell> cell);
+ void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -472,6 +472,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitProfilingCounterDecrement(int delta);
void EmitProfilingCounterReset();
+ // Emit code to pop values from the stack associated with nested statements
+ // like try/catch, try/finally, etc, running the finallies and unwinding the
+ // handlers as needed.
+ void EmitUnwindBeforeReturn();
+
// Platform-specific return sequence
void EmitReturnSequence();
@@ -648,7 +653,7 @@ class FullCodeGenerator: public AstVisitor {
struct TypeFeedbackCellEntry {
TypeFeedbackId ast_id;
- Handle<JSGlobalPropertyCell> cell;
+ Handle<Cell> cell;
};
@@ -844,7 +849,7 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
- Handle<JSGlobalPropertyCell> profiling_counter_;
+ Handle<Cell> profiling_counter_;
bool generate_debug_code_;
Zone* zone_;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 5717a96079..825d1e7c1c 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -217,7 +217,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
uint32_t addr;
uint32_t size;
#else
@@ -500,10 +500,10 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
#if defined(__MACH_O)
class MachO BASE_EMBEDDED {
public:
- MachO() : sections_(6) { }
+ explicit MachO(Zone* zone) : zone_(zone), sections_(6, zone) { }
uint32_t AddSection(MachOSection* section) {
- sections_.Add(section);
+ sections_.Add(section, zone_);
return sections_.length() - 1;
}
@@ -525,7 +525,7 @@ class MachO BASE_EMBEDDED {
uint32_t ncmds;
uint32_t sizeofcmds;
uint32_t flags;
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
uint32_t reserved;
#endif
};
@@ -534,7 +534,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
@@ -560,11 +560,11 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
header->magic = 0xFEEDFACFu;
header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
@@ -585,7 +585,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
@@ -620,7 +620,7 @@ class MachO BASE_EMBEDDED {
cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
}
-
+ Zone* zone_;
ZoneList<MachOSection*> sections_;
};
#endif // defined(__MACH_O)
@@ -629,7 +629,7 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
- explicit ELF(Zone* zone) : sections_(6, zone) {
+ explicit ELF(Zone* zone) : zone_(zone), sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
}
@@ -644,8 +644,8 @@ class ELF BASE_EMBEDDED {
return sections_[index];
}
- uint32_t AddSection(ELFSection* section, Zone* zone) {
- sections_.Add(section, zone);
+ uint32_t AddSection(ELFSection* section) {
+ sections_.Add(section, zone_);
section->set_index(sections_.length() - 1);
return sections_.length() - 1;
}
@@ -672,10 +672,10 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#else
@@ -683,14 +683,14 @@ class ELF BASE_EMBEDDED {
#endif
OS::MemCopy(header->ident, ident, 16);
header->type = 1;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
header->machine = 3;
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
// System V ABI, AMD64 Supplement
// http://www.x86-64.org/documentation/abi.pdf
header->machine = 62;
-#elif defined(V8_TARGET_ARCH_ARM)
+#elif V8_TARGET_ARCH_ARM
// Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
// infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
header->machine = 40;
@@ -743,6 +743,7 @@ class ELF BASE_EMBEDDED {
}
}
+ Zone* zone_;
ZoneList<ELFSection*> sections_;
};
@@ -784,7 +785,7 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -807,7 +808,7 @@ class ELFSymbol BASE_EMBEDDED {
uint8_t other;
uint16_t section;
};
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -921,7 +922,7 @@ class ELFSymbolTable : public ELFSection {
class CodeDescription BASE_EMBEDDED {
public:
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
enum StackState {
POST_RBP_PUSH,
POST_RBP_SET,
@@ -984,7 +985,7 @@ class CodeDescription BASE_EMBEDDED {
lineinfo_ != NULL;
}
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
ASSERT(state < STACK_STATE_MAX);
return stack_state_start_addresses_[state];
@@ -1012,7 +1013,7 @@ class CodeDescription BASE_EMBEDDED {
GDBJITLineInfo* lineinfo_;
GDBJITInterface::CodeTag tag_;
CompilationInfo* info_;
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
};
@@ -1026,8 +1027,8 @@ static void CreateSymbolsTable(CodeDescription* desc,
ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
// Symbol table should be followed by the linked string table.
- elf->AddSection(symtab, zone);
- elf->AddSection(strtab, zone);
+ elf->AddSection(symtab);
+ elf->AddSection(strtab);
symtab->Add(ELFSymbol("V8 Code",
0,
@@ -1106,13 +1107,13 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
-#elif defined(V8_TARGET_ARCH_ARM)
+#elif V8_TARGET_ARCH_ARM
UNIMPLEMENTED();
-#elif defined(V8_TARGET_ARCH_MIPS)
+#elif V8_TARGET_ARCH_MIPS
UNIMPLEMENTED();
#else
#error Unsupported target architecture.
@@ -1563,7 +1564,7 @@ class DebugLineSection : public DebugSection {
};
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
class UnwindInfoSection : public DebugSection {
public:
@@ -1793,12 +1794,12 @@ static void CreateDWARFSections(CodeDescription* desc,
Zone* zone,
DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
- obj->AddSection(new(zone) DebugInfoSection(desc), zone);
- obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
- obj->AddSection(new(zone) DebugLineSection(desc), zone);
+ obj->AddSection(new(zone) DebugInfoSection(desc));
+ obj->AddSection(new(zone) DebugAbbrevSection(desc));
+ obj->AddSection(new(zone) DebugLineSection(desc));
}
-#ifdef V8_TARGET_ARCH_X64
- obj->AddSection(new(zone) UnwindInfoSection(desc), zone);
+#if V8_TARGET_ARCH_X64
+ obj->AddSection(new(zone) UnwindInfoSection(desc));
#endif
}
@@ -1916,37 +1917,37 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
}
-static JITCodeEntry* CreateELFObject(CodeDescription* desc, Zone* zone) {
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
- MachO mach_o;
+ Zone zone(isolate);
+ MachO mach_o(&zone);
Writer w(&mach_o);
- mach_o.AddSection(new MachOTextSection(kCodeAlignment,
- desc->CodeStart(),
- desc->CodeSize()));
+ mach_o.AddSection(new(&zone) MachOTextSection(kCodeAlignment,
+ desc->CodeStart(),
+ desc->CodeSize()));
- CreateDWARFSections(desc, &mach_o);
+ CreateDWARFSections(desc, &zone, &mach_o);
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- ELF elf(zone);
+ Zone zone(isolate);
+ ELF elf(&zone);
Writer w(&elf);
int text_section_index = elf.AddSection(
- new(zone) FullHeaderELFSection(
+ new(&zone) FullHeaderELFSection(
".text",
ELFSection::TYPE_NOBITS,
kCodeAlignment,
desc->CodeStart(),
0,
desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
- zone);
+ ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
- CreateSymbolsTable(desc, zone, &elf, text_section_index);
+ CreateSymbolsTable(desc, &zone, &elf, text_section_index);
- CreateDWARFSections(desc, zone, &elf);
+ CreateDWARFSections(desc, &zone, &elf);
elf.Write(&w);
#endif
@@ -2015,7 +2016,7 @@ void GDBJITInterface::AddCode(Handle<Name> name,
}
static void AddUnwindInfo(CodeDescription* desc) {
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through
// compilation pipeline we use an approximation.
@@ -2083,8 +2084,8 @@ void GDBJITInterface::AddCode(const char* name,
}
AddUnwindInfo(&code_desc);
- Zone* zone = code->GetIsolate()->runtime_zone();
- JITCodeEntry* entry = CreateELFObject(&code_desc, zone);
+ Isolate* isolate = code->GetIsolate();
+ JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
ASSERT(!IsLineInfoTagged(entry));
delete lineinfo;
@@ -2165,6 +2166,24 @@ void GDBJITInterface::RemoveCode(Code* code) {
}
+void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
+ HashMap* entries = GetEntries();
+ Zone zone(Isolate::Current());
+ ZoneList<Code*> dead_codes(1, &zone);
+
+ for (HashMap::Entry* e = entries->Start(); e != NULL; e = entries->Next(e)) {
+ Code* code = reinterpret_cast<Code*>(e->key);
+ if (code->address() >= start && code->address() < end) {
+ dead_codes.Add(code, &zone);
+ }
+ }
+
+ for (int i = 0; i < dead_codes.length(); i++) {
+ RemoveCode(dead_codes.at(i));
+ }
+}
+
+
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
ScopedLock lock(mutex.Pointer());
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index cc052c1c06..a34d3d3012 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -131,6 +131,8 @@ class GDBJITInterface: public AllStatic {
static void RemoveCode(Code* code);
+ static void RemoveCodeRange(Address start, Address end);
+
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
};
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
index cc31a44588..3c8ea6f319 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/generator.js
@@ -55,6 +55,23 @@ function GeneratorObjectThrow(exn) {
return %_GeneratorThrow(this, exn);
}
+function GeneratorFunctionPrototypeConstructor(x) {
+ if (%_IsConstructCall()) {
+ throw MakeTypeError('not_constructor', ['GeneratorFunctionPrototype']);
+ }
+}
+
+function GeneratorFunctionConstructor(arg1) { // length == 1
+ var source = NewFunctionString(arguments, 'function*');
+ var global_receiver = %GlobalReceiver(global);
+ // Compile the string in the constructor and not a helper so that errors
+ // appear to come from here.
+ var f = %_CallFunction(global_receiver, %CompileString(source, true));
+ %FunctionMarkNameShouldPrintAsAnonymous(f);
+ return f;
+}
+
+
function SetUpGenerators() {
%CheckIsBootstrapping();
var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
@@ -65,9 +82,11 @@ function SetUpGenerators() {
%SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
+ %SetCode(GeneratorFunctionPrototype, GeneratorFunctionPrototypeConstructor);
%SetProperty(GeneratorFunctionPrototype, "constructor",
GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunction, $Function);
+ %SetCode(GeneratorFunction, GeneratorFunctionConstructor);
}
SetUpGenerators();
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index ac26e009c4..cd75133a24 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -152,7 +152,7 @@ class GlobalHandles {
int NumberOfGlobalObjectWeakHandles();
// Returns the current number of handles to global objects.
- int NumberOfGlobalHandles() {
+ int global_handles_count() const {
return number_of_global_handles_;
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 59931bf5dd..baacf5226e 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -105,8 +105,8 @@ namespace internal {
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
- !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -121,18 +121,16 @@ namespace internal {
#endif
// Check for supported combinations of host and target architectures.
-#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
#error Target architecture ia32 is only supported on ia32 host
#endif
-#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
+#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
#error Target architecture x64 is only supported on x64 host
#endif
-#if (defined(V8_TARGET_ARCH_ARM) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
+#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
+#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -140,14 +138,27 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
-#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM))
+#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
+#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
#endif
+// Determine architecture endiannes (we only support little-endian).
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_MIPS
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endiannes
+#endif
+
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -399,6 +410,18 @@ enum LanguageMode {
};
+// A simple Maybe type, that can be passed by value.
+template<class T>
+struct Maybe {
+ Maybe() : has_value(false) {}
+ explicit Maybe(T t) : has_value(true), value(t) {}
+ Maybe(bool has, T t) : has_value(has), value(t) {}
+
+ bool has_value;
+ T value;
+};
+
+
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
//
// This flag is used in the backend to represent the language mode. So far
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 81828d98cc..7d4b25f10f 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -294,13 +294,6 @@ Handle<Object> GetProperty(Isolate* isolate,
}
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
- const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetPrototype(*value, skip_hidden_prototypes), Object);
-}
-
-
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index) {
CALL_HEAP_FUNCTION(
@@ -347,7 +340,7 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
static void ClearWrapperCache(v8::Isolate* v8_isolate,
Persistent<v8::Value>* handle,
void*) {
- Handle<Object> cache = Utils::OpenHandle(**handle);
+ Handle<Object> cache = Utils::OpenPersistent(handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
ASSERT(foreign->foreign_address() ==
@@ -557,11 +550,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
v8::NamedPropertyEnumerator enum_fun =
v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(enum_fun);
- }
+ result = args.Call(enum_fun);
}
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
@@ -583,14 +572,10 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
v8::IndexedPropertyEnumerator enum_fun =
v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(enum_fun);
+ result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
- }
}
return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
result);
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 0cd4f5bca9..5976b758e1 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "apiutils.h"
+#include "objects.h"
namespace v8 {
namespace internal {
@@ -244,8 +245,6 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj,
Handle<Object> key);
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index f0861b2e7a..92ae8e54b5 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -245,6 +245,8 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
+ } else if (PROPERTY_CELL_SPACE == space) {
+ result = property_cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
result = map_space_->AllocateRaw(size_in_bytes);
@@ -305,7 +307,19 @@ MaybeObject* Heap::AllocateRawCell() {
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
- MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
+ MaybeObject* result = cell_space_->AllocateRaw(Cell::kSize);
+ if (result->IsFailure()) old_gen_exhausted_ = true;
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateRawPropertyCell() {
+#ifdef DEBUG
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
+#endif
+ MaybeObject* result =
+ property_cell_space_->AllocateRaw(PropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
@@ -407,7 +421,8 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
ASSERT(type != MAP_TYPE);
ASSERT(type != CODE_TYPE);
ASSERT(type != ODDBALL_TYPE);
- ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
+ ASSERT(type != CELL_TYPE);
+ ASSERT(type != PROPERTY_CELL_TYPE);
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
@@ -535,7 +550,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
} else {
- // Give up and reset the counters in case of an overflow.
+ // Give up and reset the counters in case of an underflow.
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
@@ -543,8 +558,11 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
if (FLAG_trace_external_memory) {
PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
- " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
+ "amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
+ "isolate=0x%08" V8PRIxPTR ".\n",
+ change_in_bytes / KB,
+ amount_of_external_allocated_memory_ / KB,
+ PromotedExternalMemorySize() / KB,
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
@@ -552,11 +570,6 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
}
-void Heap::SetLastScriptId(Object* last_script_id) {
- roots_[kLastScriptIdRootIndex] = last_script_id;
-}
-
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 4f6fdb1122..e517df4a1a 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -124,11 +124,6 @@ HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
}
-HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- return snapshots_->GetSnapshot(uid);
-}
-
-
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 1ed73b9ca6..5ae60fa923 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -28,6 +28,7 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
+#include "heap-snapshot-generator-inl.h"
#include "isolate.h"
namespace v8 {
@@ -65,7 +66,6 @@ class HeapProfiler {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
- HeapSnapshot* FindSnapshot(unsigned uid);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index b8237a6b13..f959aee00e 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -31,6 +31,7 @@
#include "heap-profiler.h"
#include "debug.h"
+#include "types.h"
namespace v8 {
namespace internal {
@@ -188,15 +189,11 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
static const int kExpectedHeapEntrySize = 24;
- static const int kExpectedHeapSnapshotsCollectionSize = 100;
- static const int kExpectedHeapSnapshotSize = 132;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 32;
- static const int kExpectedHeapSnapshotsCollectionSize = 152;
- static const int kExpectedHeapSnapshotSize = 160;
};
} // namespace
@@ -237,7 +234,7 @@ void HeapSnapshot::RememberLastJSObjectId() {
HeapEntry* HeapSnapshot::AddRootEntry() {
ASSERT(root_index_ == HeapEntry::kNoEntry);
ASSERT(entries_.is_empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
+ HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"",
HeapObjectsMap::kInternalRootObjectId,
0);
@@ -249,7 +246,7 @@ HeapEntry* HeapSnapshot::AddRootEntry() {
HeapEntry* HeapSnapshot::AddGcRootsEntry() {
ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
+ HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"(GC roots)",
HeapObjectsMap::kGcRootsObjectId,
0);
@@ -262,7 +259,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
HeapEntry* entry = AddEntry(
- HeapEntry::kObject,
+ HeapEntry::kSynthetic,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
0);
@@ -352,8 +349,6 @@ static size_t GetMemoryUsedByList(const List<T, P>& list) {
size_t HeapSnapshot::RawSnapshotSize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
- sizeof(HeapSnapshot)); // NOLINT
return
sizeof(*this) +
GetMemoryUsedByList(entries_) +
@@ -577,7 +572,6 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
- snapshots_uids_(HeapSnapshotsMatch),
token_enumerator_(new TokenEnumerator()),
ids_(heap) {
}
@@ -606,29 +600,12 @@ void HeapSnapshotsCollection::SnapshotGenerationFinished(
ids_.SnapshotGenerationFinished();
if (snapshot != NULL) {
snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
}
}
-HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
- HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
-}
-
-
void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
snapshots_.RemoveElement(snapshot);
- unsigned uid = snapshot->uid();
- snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
}
@@ -655,13 +632,9 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
- kExpectedHeapSnapshotsCollectionSize ==
- sizeof(HeapSnapshotsCollection)); // NOLINT
size_t size = sizeof(*this);
size += names_.GetUsedMemorySize();
size += ids_.GetUsedMemorySize();
- size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
size += GetMemoryUsedByList(snapshots_);
for (int i = 0; i < snapshots_.length(); ++i) {
size += snapshots_[i]->RawSnapshotSize();
@@ -888,7 +861,8 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
#undef MAKE_STRING_MAP_CASE
default: return "system / Map";
}
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
+ case CELL_TYPE: return "system / Cell";
+ case PROPERTY_CELL_TYPE: return "system / PropertyCell";
case FOREIGN_TYPE: return "system / Foreign";
case ODDBALL_TYPE: return "system / Oddball";
#define MAKE_STRUCT_CASE(NAME, Name, name) \
@@ -959,7 +933,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
- ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
+ ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -972,13 +946,18 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
} else if (obj->IsScript()) {
ExtractScriptReferences(entry, Script::cast(obj));
+ } else if (obj->IsAccessorPair()) {
+ ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
} else if (obj->IsCodeCache()) {
ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsJSGlobalPropertyCell()) {
- ExtractJSGlobalPropertyCellReferences(
- entry, JSGlobalPropertyCell::cast(obj));
+ } else if (obj->IsCell()) {
+ ExtractCellReferences(entry, Cell::cast(obj));
+ extract_indexed_refs = false;
+ } else if (obj->IsPropertyCell()) {
+ ExtractPropertyCellReferences(
+ entry, PropertyCell::cast(obj));
extract_indexed_refs = false;
}
if (extract_indexed_refs) {
@@ -989,19 +968,11 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
}
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- Object* object = proxy->map()->prototype();
- bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- is_debug_object = object->IsGlobalObject() &&
- Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
- if (!is_debug_object) {
- SetUserGlobalReference(object);
- }
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(
+ int entry, JSGlobalProxy* proxy) {
+ SetInternalReference(proxy, entry,
+ "native_context", proxy->native_context(),
+ JSGlobalProxy::kNativeContextOffset);
}
@@ -1043,9 +1014,9 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(js_fun, entry,
"shared", shared_info,
JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->unchecked_context(), "(context)");
+ TagObject(js_fun->context(), "(context)");
SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
+ "context", js_fun->context(),
JSFunction::kContextOffset);
for (int i = JSFunction::kNonWeakFieldsEndOffset;
i < JSFunction::kSize;
@@ -1237,6 +1208,15 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
}
+void V8HeapExplorer::ExtractAccessorPairReferences(
+ int entry, AccessorPair* accessors) {
+ SetInternalReference(accessors, entry, "getter", accessors->getter(),
+ AccessorPair::kGetterOffset);
+ SetInternalReference(accessors, entry, "setter", accessors->setter(),
+ AccessorPair::kSetterOffset);
+}
+
+
void V8HeapExplorer::ExtractCodeCacheReferences(
int entry, CodeCache* code_cache) {
TagObject(code_cache->default_cache(), "(default code cache)");
@@ -1273,9 +1253,15 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
}
-void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
- int entry, JSGlobalPropertyCell* cell) {
+void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
+ SetInternalReference(cell, entry, "value", cell->value());
+}
+
+
+void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
+ PropertyCell* cell) {
SetInternalReference(cell, entry, "value", cell->value());
+ SetInternalReference(cell, entry, "type", cell->type());
}
@@ -1342,21 +1328,11 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
js_obj, entry,
descs->GetKey(i), descs->GetConstantFunction(i));
break;
- case CALLBACKS: {
- Object* callback_obj = descs->GetValue(i);
- if (callback_obj->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(callback_obj);
- if (Object* getter = accessors->getter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- getter, "get-%s");
- }
- if (Object* setter = accessors->setter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- setter, "set-%s");
- }
- }
+ case CALLBACKS:
+ ExtractAccessorPairProperty(
+ js_obj, entry,
+ descs->GetKey(i), descs->GetValue(i));
break;
- }
case NORMAL: // only in slow mode
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
@@ -1375,21 +1351,38 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (dictionary->IsKey(k)) {
Object* target = dictionary->ValueAt(i);
// We assume that global objects can only have slow properties.
- Object* value = target->IsJSGlobalPropertyCell()
- ? JSGlobalPropertyCell::cast(target)->value()
+ Object* value = target->IsPropertyCell()
+ ? PropertyCell::cast(target)->value()
: target;
- if (k != heap_->hidden_string()) {
- SetPropertyReference(js_obj, entry, String::cast(k), value);
- } else {
+ if (k == heap_->hidden_string()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
+ continue;
}
+ if (ExtractAccessorPairProperty(js_obj, entry, k, value)) continue;
+ SetPropertyReference(js_obj, entry, String::cast(k), value);
}
}
}
}
+bool V8HeapExplorer::ExtractAccessorPairProperty(
+ JSObject* js_obj, int entry, Object* key, Object* callback_obj) {
+ if (!callback_obj->IsAccessorPair()) return false;
+ AccessorPair* accessors = AccessorPair::cast(callback_obj);
+ Object* getter = accessors->getter();
+ if (!getter->IsOddball()) {
+ SetPropertyReference(js_obj, entry, String::cast(key), getter, "get %s");
+ }
+ Object* setter = accessors->setter();
+ if (!setter->IsOddball()) {
+ SetPropertyReference(js_obj, entry, String::cast(key), setter, "set %s");
+ }
+ return true;
+}
+
+
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
@@ -1562,6 +1555,7 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
&& object != heap_->empty_fixed_array()
&& object != heap_->empty_descriptor_array()
&& object != heap_->fixed_array_map()
+ && object != heap_->cell_map()
&& object != heap_->global_property_cell_map()
&& object != heap_->shared_function_info_map()
&& object != heap_->free_space_map()
@@ -1748,6 +1742,22 @@ void V8HeapExplorer::SetGcSubrootReference(
snapshot_->gc_subroot(tag)->index(),
child_entry);
}
+
+ // Add a shortcut to JS global object reference at snapshot root.
+ if (child_obj->IsNativeContext()) {
+ Context* context = Context::cast(child_obj);
+ GlobalObject* global = context->global_object();
+ if (global->IsJSGlobalObject()) {
+ bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
+#endif
+ if (!is_debug_object && !user_roots_.Contains(global)) {
+ user_roots_.Insert(global);
+ SetUserGlobalReference(global);
+ }
+ }
+ }
}
}
@@ -2210,6 +2220,8 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
CHECK(!debug_heap->code_space()->was_swept_conservatively());
CHECK(!debug_heap->cell_space()->was_swept_conservatively());
+ CHECK(!debug_heap->property_cell_space()->
+ was_swept_conservatively());
CHECK(!debug_heap->map_space()->was_swept_conservatively());
#endif
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 223b240b8f..cd1ec29242 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -28,6 +28,8 @@
#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_HEAP_SNAPSHOT_GENERATOR_H_
+#include "profile-generator-inl.h"
+
namespace v8 {
namespace internal {
@@ -301,7 +303,6 @@ class HeapSnapshotsCollection {
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- HeapSnapshot* GetSnapshot(unsigned uid);
void RemoveSnapshot(HeapSnapshot* snapshot);
StringsStorage* names() { return &names_; }
@@ -321,14 +322,8 @@ class HeapSnapshotsCollection {
size_t GetUsedMemorySize() const;
private:
- INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
- // Mapping from snapshots' uids to HeapSnapshot* pointers.
- HashMap snapshots_uids_;
StringsStorage names_;
TokenEnumerator* token_enumerator_;
// Mapping from HeapObject addresses to objects' uids.
@@ -454,7 +449,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
- void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
+ void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
void ExtractJSObjectReferences(int entry, JSObject* js_obj);
void ExtractStringReferences(int entry, String* obj);
void ExtractContextReferences(int entry, Context* context);
@@ -462,12 +457,15 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractSharedFunctionInfoReferences(int entry,
SharedFunctionInfo* shared);
void ExtractScriptReferences(int entry, Script* script);
+ void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
- void ExtractJSGlobalPropertyCellReferences(int entry,
- JSGlobalPropertyCell* cell);
+ void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
+ bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
+ Object* key, Object* callback_obj);
void ExtractElementReferences(JSObject* js_obj, int entry);
void ExtractInternalReferences(JSObject* js_obj, int entry);
bool IsEssentialObject(Object* object);
@@ -529,6 +527,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
SnapshotFillerInterface* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
+ HeapObjectsSet user_roots_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
static HeapObject* const kGcRootsObject;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 2817fcba58..9d1ac8c1ed 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
@@ -66,7 +67,7 @@ Heap::Heap()
: isolate_(NULL),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#define LUMP_OF_MEMORY (2 * MB)
code_range_size_(512*MB),
#else
@@ -105,6 +106,7 @@ Heap::Heap()
code_space_(NULL),
map_space_(NULL),
cell_space_(NULL),
+ property_cell_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
@@ -151,7 +153,6 @@ Heap::Heap()
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
mark_sweeps_since_idle_round_started_(0),
- ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
gcs_since_last_deopt_(0),
@@ -199,7 +200,8 @@ intptr_t Heap::Capacity() {
old_data_space_->Capacity() +
code_space_->Capacity() +
map_space_->Capacity() +
- cell_space_->Capacity();
+ cell_space_->Capacity() +
+ property_cell_space_->Capacity();
}
@@ -212,6 +214,7 @@ intptr_t Heap::CommittedMemory() {
code_space_->CommittedMemory() +
map_space_->CommittedMemory() +
cell_space_->CommittedMemory() +
+ property_cell_space_->CommittedMemory() +
lo_space_->Size();
}
@@ -225,6 +228,7 @@ size_t Heap::CommittedPhysicalMemory() {
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
cell_space_->CommittedPhysicalMemory() +
+ property_cell_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
@@ -244,7 +248,8 @@ intptr_t Heap::Available() {
old_data_space_->Available() +
code_space_->Available() +
map_space_->Available() +
- cell_space_->Available();
+ cell_space_->Available() +
+ property_cell_space_->Available();
}
@@ -254,6 +259,7 @@ bool Heap::HasBeenSetUp() {
code_space_ != NULL &&
map_space_ != NULL &&
cell_space_ != NULL &&
+ property_cell_space_ != NULL &&
lo_space_ != NULL;
}
@@ -383,6 +389,12 @@ void Heap::PrintShortHeapStatistics() {
cell_space_->SizeOfObjects() / KB,
cell_space_->Available() / KB,
cell_space_->CommittedMemory() / KB);
+ PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ property_cell_space_->SizeOfObjects() / KB,
+ property_cell_space_->Available() / KB,
+ property_cell_space_->CommittedMemory() / KB);
PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
", available: %6" V8_PTR_PREFIX "d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
@@ -395,6 +407,8 @@ void Heap::PrintShortHeapStatistics() {
this->SizeOfObjects() / KB,
this->Available() / KB,
this->CommittedMemory() / KB);
+ PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+ amount_of_external_allocated_memory_ / KB);
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -514,6 +528,10 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->heap_fraction_cell_space()->AddSample(
static_cast<int>(
(cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_property_cell_space()->
+ AddSample(static_cast<int>(
+ (property_cell_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
@@ -523,6 +541,10 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
static_cast<int>(cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->
+ heap_sample_property_cell_space_committed()->
+ AddSample(static_cast<int>(
+ property_cell_space()->CommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -548,6 +570,7 @@ void Heap::GarbageCollectionEpilogue() {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -1212,7 +1235,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
- store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
@@ -1353,15 +1376,31 @@ void Heap::Scavenge() {
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
}
- // Copy objects reachable from cells by scavenging cell values directly.
+ // Copy objects reachable from simple cells by scavenging cell values
+ // directly.
HeapObjectIterator cell_iterator(cell_space_);
for (HeapObject* heap_object = cell_iterator.Next();
heap_object != NULL;
heap_object = cell_iterator.Next()) {
- if (heap_object->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
+ if (heap_object->IsCell()) {
+ Cell* cell = Cell::cast(heap_object);
+ Address value_address = cell->ValueAddress();
+ scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ }
+ }
+
+ // Copy objects reachable from global property cells by scavenging global
+ // property cell values directly.
+ HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
+ for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
+ heap_object != NULL;
+ heap_object = js_global_property_cell_iterator.Next()) {
+ if (heap_object->IsPropertyCell()) {
+ PropertyCell* cell = PropertyCell::cast(heap_object);
Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ Address type_address = cell->TypeAddress();
+ scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
}
}
@@ -1490,55 +1529,129 @@ void Heap::UpdateReferencesInExternalStringTable(
}
-static Object* ProcessFunctionWeakReferences(Heap* heap,
- Object* function,
- WeakObjectRetainer* retainer,
- bool record_slots) {
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+static Object* VisitWeakList(Heap* heap,
+ Object* list,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
- JSFunction* tail = NULL;
- Object* candidate = function;
- while (candidate != undefined) {
+ T* tail = NULL;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ while (list != undefined) {
// Check whether to keep the candidate in the list.
- JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
+ T* candidate = reinterpret_cast<T*>(list);
+ Object* retained = retainer->RetainAs(list);
+ if (retained != NULL) {
if (head == undefined) {
// First element in the list.
- head = retain;
+ head = retained;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_next_function_link(retain);
+ WeakListVisitor<T>::SetWeakNext(tail, retained);
if (record_slots) {
- Object** next_function =
- HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
- heap->mark_compact_collector()->RecordSlot(
- next_function, next_function, retain);
+ Object** next_slot =
+ HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
+ collector->RecordSlot(next_slot, next_slot, retained);
}
}
- // Retained function is new tail.
- candidate_function = reinterpret_cast<JSFunction*>(retain);
- tail = candidate_function;
+ // Retained object is new tail.
+ ASSERT(!retained->IsUndefined());
+ candidate = reinterpret_cast<T*>(retained);
+ tail = candidate;
- ASSERT(retain->IsUndefined() || retain->IsJSFunction());
- if (retain == undefined) break;
+ // tail is a live object, visit it.
+ WeakListVisitor<T>::VisitLiveObject(
+ heap, tail, retainer, record_slots);
+ } else {
+ WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
}
// Move to next element in the list.
- candidate = candidate_function->next_function_link();
+ list = WeakListVisitor<T>::WeakNext(candidate);
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(undefined);
+ WeakListVisitor<T>::SetWeakNext(tail, undefined);
}
-
return head;
}
+template<>
+struct WeakListVisitor<JSFunction> {
+ static void SetWeakNext(JSFunction* function, Object* next) {
+ function->set_next_function_link(next);
+ }
+
+ static Object* WeakNext(JSFunction* function) {
+ return function->next_function_link();
+ }
+
+ static int WeakNextOffset() {
+ return JSFunction::kNextFunctionLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, JSFunction*,
+ WeakObjectRetainer*, bool) {
+ }
+
+ static void VisitPhantomObject(Heap*, JSFunction*) {
+ }
+};
+
+
+template<>
+struct WeakListVisitor<Context> {
+ static void SetWeakNext(Context* context, Object* next) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ next,
+ UPDATE_WRITE_BARRIER);
+ }
+
+ static Object* WeakNext(Context* context) {
+ return context->get(Context::NEXT_CONTEXT_LINK);
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
+ // Process the weak list of optimized functions for the context.
+ Object* function_list_head =
+ VisitWeakList<JSFunction>(
+ heap,
+ context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
+ retainer,
+ record_slots);
+ context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
+ function_list_head,
+ UPDATE_WRITE_BARRIER);
+ if (record_slots) {
+ Object** optimized_functions =
+ HeapObject::RawField(
+ context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
+ heap->mark_compact_collector()->RecordSlot(
+ optimized_functions, optimized_functions, function_list_head);
+ }
+ }
+
+ static void VisitPhantomObject(Heap*, Context*) {
+ }
+
+ static int WeakNextOffset() {
+ return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+ }
+};
+
+
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
@@ -1553,173 +1666,95 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
bool record_slots) {
- Object* undefined = undefined_value();
- Object* head = undefined;
- Context* tail = NULL;
- Object* candidate = native_contexts_list_;
-
- while (candidate != undefined) {
- // Check whether to keep the candidate in the list.
- Context* candidate_context = reinterpret_cast<Context*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == undefined) {
- // First element in the list.
- head = retain;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- retain,
- UPDATE_WRITE_BARRIER);
-
- if (record_slots) {
- Object** next_context =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
- mark_compact_collector()->RecordSlot(
- next_context, next_context, retain);
- }
- }
- // Retained context is new tail.
- candidate_context = reinterpret_cast<Context*>(retain);
- tail = candidate_context;
-
- if (retain == undefined) break;
-
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- ProcessFunctionWeakReferences(
- this,
- candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer,
- record_slots);
- candidate_context->set_unchecked(this,
- Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
- if (record_slots) {
- Object** optimized_functions =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
- mark_compact_collector()->RecordSlot(
- optimized_functions, optimized_functions, function_list_head);
- }
- }
-
- // Move to next element in the list.
- candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- Heap::undefined_value(),
- UPDATE_WRITE_BARRIER);
- }
-
+ Object* head =
+ VisitWeakList<Context>(
+ this, native_contexts_list(), retainer, record_slots);
// Update the head of the list of contexts.
native_contexts_list_ = head;
}
-template <class T>
-struct WeakListVisitor;
-
-
-template <class T>
-static Object* VisitWeakList(Object* list,
- MarkCompactCollector* collector,
- WeakObjectRetainer* retainer, bool record_slots) {
- Object* head = Smi::FromInt(0);
- T* tail = NULL;
- while (list != Smi::FromInt(0)) {
- Object* retained = retainer->RetainAs(list);
- if (retained != NULL) {
- if (head == Smi::FromInt(0)) {
- head = retained;
- } else {
- ASSERT(tail != NULL);
- WeakListVisitor<T>::set_weak_next(tail, retained);
- if (record_slots) {
- Object** next_slot =
- HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
- collector->RecordSlot(next_slot, next_slot, retained);
- }
- }
- tail = reinterpret_cast<T*>(retained);
- WeakListVisitor<T>::VisitLiveObject(
- tail, collector, retainer, record_slots);
- }
- list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
- }
- if (tail != NULL) {
- tail->set_weak_next(Smi::FromInt(0));
- }
- return head;
-}
-
-
template<>
-struct WeakListVisitor<JSTypedArray> {
- static void set_weak_next(JSTypedArray* obj, Object* next) {
+struct WeakListVisitor<JSArrayBufferView> {
+ static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
obj->set_weak_next(next);
}
- static Object* get_weak_next(JSTypedArray* obj) {
+ static Object* WeakNext(JSArrayBufferView* obj) {
return obj->weak_next();
}
- static void VisitLiveObject(JSTypedArray* obj,
- MarkCompactCollector* collector,
+ static void VisitLiveObject(Heap*,
+ JSArrayBufferView* obj,
WeakObjectRetainer* retainer,
bool record_slots) {}
- static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
+ static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+
+ static int WeakNextOffset() {
+ return JSArrayBufferView::kWeakNextOffset;
+ }
};
template<>
struct WeakListVisitor<JSArrayBuffer> {
- static void set_weak_next(JSArrayBuffer* obj, Object* next) {
+ static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
obj->set_weak_next(next);
}
- static Object* get_weak_next(JSArrayBuffer* obj) {
+ static Object* WeakNext(JSArrayBuffer* obj) {
return obj->weak_next();
}
- static void VisitLiveObject(JSArrayBuffer* array_buffer,
- MarkCompactCollector* collector,
+ static void VisitLiveObject(Heap* heap,
+ JSArrayBuffer* array_buffer,
WeakObjectRetainer* retainer,
bool record_slots) {
Object* typed_array_obj =
- VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
- collector, retainer, record_slots);
- array_buffer->set_weak_first_array(typed_array_obj);
- if (typed_array_obj != Smi::FromInt(0) && record_slots) {
+ VisitWeakList<JSArrayBufferView>(
+ heap,
+ array_buffer->weak_first_view(),
+ retainer, record_slots);
+ array_buffer->set_weak_first_view(typed_array_obj);
+ if (typed_array_obj != heap->undefined_value() && record_slots) {
Object** slot = HeapObject::RawField(
- array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
- collector->RecordSlot(slot, slot, typed_array_obj);
+ array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
}
}
- static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
+ static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+ Runtime::FreeArrayBuffer(heap->isolate(), phantom);
+ }
+
+ static int WeakNextOffset() {
+ return JSArrayBuffer::kWeakNextOffset;
+ }
};
void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
bool record_slots) {
Object* array_buffer_obj =
- VisitWeakList<JSArrayBuffer>(array_buffers_list(),
- mark_compact_collector(),
+ VisitWeakList<JSArrayBuffer>(this,
+ array_buffers_list(),
retainer, record_slots);
set_array_buffers_list(array_buffer_obj);
}
+void Heap::TearDownArrayBuffers() {
+ Object* undefined = undefined_value();
+ for (Object* o = array_buffers_list(); o != undefined;) {
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
+ Runtime::FreeArrayBuffer(isolate(), buffer);
+ o = buffer->weak_next();
+ }
+ array_buffers_list_ = undefined;
+}
+
+
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
@@ -1903,6 +1938,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
+ table_.Register(kVisitJSDataView,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -2663,8 +2702,13 @@ bool Heap::CreateInitialMaps() {
}
set_code_map(Map::cast(obj));
- { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
- JSGlobalPropertyCell::kSize);
+ { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_cell_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
+ PropertyCell::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_global_property_cell_map(Map::cast(obj));
@@ -2798,14 +2842,29 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
}
-MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
+MaybeObject* Heap::AllocateCell(Object* value) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawCell();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
+ HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
+ Cell::cast(result)->set_value(value);
+ return result;
+}
+
+
+MaybeObject* Heap::AllocatePropertyCell(Object* value) {
+ Object* result;
+ { MaybeObject* maybe_result = AllocateRawPropertyCell();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
HeapObject::cast(result)->set_map_no_write_barrier(
global_property_cell_map());
- JSGlobalPropertyCell::cast(result)->set_value(value);
+ PropertyCell* cell = PropertyCell::cast(result);
+ cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ cell->set_value(value);
+ cell->set_type(Type::None());
return result;
}
@@ -3100,7 +3159,7 @@ bool Heap::CreateInitialObjects() {
set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
// Handling of script id generation is in Factory::NewScript.
- set_last_script_id(undefined_value());
+ set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -4370,7 +4429,10 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
+ int prop_size =
+ map->pre_allocated_property_fields() +
+ map->unused_property_fields() -
+ map->inobject_properties();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
@@ -4407,7 +4469,10 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
+ int prop_size =
+ map->pre_allocated_property_fields() +
+ map->unused_property_fields() -
+ map->inobject_properties();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
@@ -4469,8 +4534,7 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
// advice
Map* initial_map = constructor->initial_map();
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
- *allocation_site_info_payload);
+ Cell* cell = Cell::cast(*allocation_site_info_payload);
Smi* smi = Smi::cast(cell->value());
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
@@ -4699,7 +4763,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
- // require us to change property values to JSGlobalPropertyCells.
+ // require us to change property values to PropertyCells.
ASSERT(map->NextFreePropertyIndex() == 0);
// Make sure we don't have a ton of pre-allocated slots in the
@@ -4728,7 +4792,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
+ MaybeObject* maybe_value = AllocatePropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;
MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
@@ -5833,6 +5897,7 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
uncommit = true;
}
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ mark_sweeps_since_idle_round_started_++;
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
@@ -5846,6 +5911,7 @@ bool Heap::IdleNotification(int hint) {
// Hints greater than this value indicate that
// the embedder is requesting a lot of GC work.
const int kMaxHint = 1000;
+ const int kMinHintForIncrementalMarking = 10;
// Minimal hint that allows to do full GC.
const int kMinHintForFullGC = 100;
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
@@ -5908,18 +5974,9 @@ bool Heap::IdleNotification(int hint) {
}
}
- int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
- mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
- ms_count_at_last_idle_notification_ = ms_count_;
-
int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
mark_sweeps_since_idle_round_started_;
- if (remaining_mark_sweeps <= 0) {
- FinishIdleRound();
- return true;
- }
-
if (incremental_marking()->IsStopped()) {
// If there are no more than two GCs left in this idle round and we are
// allowed to do a full GC, then make those GCs full in order to compact
@@ -5929,13 +5986,21 @@ bool Heap::IdleNotification(int hint) {
if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: finalize idle round");
- } else {
+ mark_sweeps_since_idle_round_started_++;
+ } else if (hint > kMinHintForIncrementalMarking) {
incremental_marking()->Start();
}
}
- if (!incremental_marking()->IsStopped()) {
+ if (!incremental_marking()->IsStopped() &&
+ hint > kMinHintForIncrementalMarking) {
AdvanceIdleIncrementalMarking(step_size);
}
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ FinishIdleRound();
+ return true;
+ }
+
return false;
}
@@ -6052,6 +6117,8 @@ void Heap::ReportHeapStatistics(const char* title) {
map_space_->ReportStatistics();
PrintF("Cell space : ");
cell_space_->ReportStatistics();
+ PrintF("PropertyCell space : ");
+ property_cell_space_->ReportStatistics();
PrintF("Large object space : ");
lo_space_->ReportStatistics();
PrintF(">>>>>> ========================================= >>>>>>\n");
@@ -6073,6 +6140,7 @@ bool Heap::Contains(Address addr) {
code_space_->Contains(addr) ||
map_space_->Contains(addr) ||
cell_space_->Contains(addr) ||
+ property_cell_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
}
@@ -6099,6 +6167,8 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return map_space_->Contains(addr);
case CELL_SPACE:
return cell_space_->Contains(addr);
+ case PROPERTY_CELL_SPACE:
+ return property_cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
}
@@ -6125,6 +6195,7 @@ void Heap::Verify() {
old_data_space_->Verify(&no_dirty_regions_visitor);
code_space_->Verify(&no_dirty_regions_visitor);
cell_space_->Verify(&no_dirty_regions_visitor);
+ property_cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@@ -6588,7 +6659,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 16 * max_semispace_size_;
+
+ // The external allocation limit should be below 256 MB on all architectures
+ // to avoid unnecessary low memory notifications, as that is the threshold
+ // for some embedders.
+ external_allocation_limit_ = 12 * max_semispace_size_;
+ ASSERT(external_allocation_limit_ <= 256 * MB);
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
@@ -6624,6 +6700,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_capacity = map_space_->Capacity();
*stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
+ *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
+ *stats->property_cell_space_capacity = property_cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
*stats->memory_allocator_size = isolate()->memory_allocator()->Size();
@@ -6652,6 +6730,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
+ code_space_->SizeOfObjects()
+ map_space_->SizeOfObjects()
+ cell_space_->SizeOfObjects()
+ + property_cell_space_->SizeOfObjects()
+ lo_space_->SizeOfObjects();
}
@@ -6740,11 +6819,17 @@ bool Heap::SetUp() {
if (map_space_ == NULL) return false;
if (!map_space_->SetUp()) return false;
- // Initialize global property cell space.
+ // Initialize simple cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->SetUp()) return false;
+ // Initialize global property cell space.
+ property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
+ PROPERTY_CELL_SPACE);
+ if (property_cell_space_ == NULL) return false;
+ if (!property_cell_space_->SetUp()) return false;
+
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
@@ -6785,6 +6870,7 @@ bool Heap::CreateHeapObjects() {
if (!CreateInitialObjects()) return false;
native_contexts_list_ = undefined_value();
+ array_buffers_list_ = undefined_value();
return true;
}
@@ -6827,6 +6913,8 @@ void Heap::TearDown() {
PrintF("\n\n");
}
+ TearDownArrayBuffers();
+
isolate_->global_handles()->TearDown();
external_string_table_.TearDown();
@@ -6865,6 +6953,12 @@ void Heap::TearDown() {
cell_space_ = NULL;
}
+ if (property_cell_space_ != NULL) {
+ property_cell_space_->TearDown();
+ delete property_cell_space_;
+ property_cell_space_ = NULL;
+ }
+
if (lo_space_ != NULL) {
lo_space_->TearDown();
delete lo_space_;
@@ -6955,6 +7049,8 @@ Space* AllSpaces::next() {
return heap_->map_space();
case CELL_SPACE:
return heap_->cell_space();
+ case PROPERTY_CELL_SPACE:
+ return heap_->property_cell_space();
case LO_SPACE:
return heap_->lo_space();
default:
@@ -6975,6 +7071,8 @@ PagedSpace* PagedSpaces::next() {
return heap_->map_space();
case CELL_SPACE:
return heap_->cell_space();
+ case PROPERTY_CELL_SPACE:
+ return heap_->property_cell_space();
default:
return NULL;
}
@@ -7064,6 +7162,10 @@ ObjectIterator* SpaceIterator::CreateIterator() {
case CELL_SPACE:
iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
break;
+ case PROPERTY_CELL_SPACE:
+ iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
+ size_func_);
+ break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
break;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index da10efcee5..d254b607b6 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -60,6 +60,7 @@ namespace internal {
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, uninitialized_value, UninitializedValue) \
+ V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
@@ -174,7 +175,7 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId) \
+ V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
@@ -588,6 +589,9 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
+ PropertyCellSpace* property_cell_space() {
+ return property_cell_space_;
+ }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
@@ -599,6 +603,8 @@ class Heap {
return map_space();
case CELL_SPACE:
return cell_space();
+ case PROPERTY_CELL_SPACE:
+ return property_cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
@@ -933,11 +939,17 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol();
+ // Allocate a tenured simple cell.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
+
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
+ MUST_USE_RESULT MaybeObject* AllocatePropertyCell(Object* value);
// Allocate Box.
MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
@@ -1428,9 +1440,6 @@ class Heap {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
- // Update the next script id.
- inline void SetLastScriptId(Object* last_script_id);
-
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1861,7 +1870,7 @@ class Heap {
enum {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
+ FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
OBJECT_STATS_COUNT =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
};
@@ -1873,7 +1882,7 @@ class Heap {
object_sizes_[type] += size;
} else {
if (type == CODE_TYPE) {
- ASSERT(sub_type <= Code::LAST_CODE_KIND);
+ ASSERT(sub_type < Code::NUMBER_OF_KINDS);
object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
} else if (type == FIXED_ARRAY_TYPE) {
@@ -1946,7 +1955,7 @@ class Heap {
int scan_on_scavenge_pages_;
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
static const int kMaxObjectSizeInNewSpace = 512*KB;
@@ -1958,6 +1967,7 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
CellSpace* cell_space_;
+ PropertyCellSpace* property_cell_space_;
LargeObjectSpace* lo_space_;
HeapState gc_state_;
int gc_post_processing_depth_;
@@ -2114,9 +2124,12 @@ class Heap {
// (since both AllocateRaw and AllocateRawMap are inlined).
MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
- // Allocate an uninitialized object in the global property cell space.
+ // Allocate an uninitialized object in the simple cell space.
MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
+ // Allocate an uninitialized object in the global property cell space.
+ MUST_USE_RESULT inline MaybeObject* AllocateRawPropertyCell();
+
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
@@ -2176,6 +2189,9 @@ class Heap {
void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
+ // Called on heap tear-down.
+ void TearDownArrayBuffers();
+
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
@@ -2277,7 +2293,6 @@ class Heap {
void StartIdleRound() {
mark_sweeps_since_idle_round_started_ = 0;
- ms_count_at_last_idle_notification_ = ms_count_;
}
void FinishIdleRound() {
@@ -2354,7 +2369,6 @@ class Heap {
bool last_idle_notification_gc_count_init_;
int mark_sweeps_since_idle_round_started_;
- int ms_count_at_last_idle_notification_;
unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_;
@@ -2437,6 +2451,8 @@ class HeapStats {
int* size_per_type; // 22
int* os_error; // 23
int* end_marker; // 24
+ intptr_t* property_cell_space_size; // 25
+ intptr_t* property_cell_space_capacity; // 26
};
@@ -2936,6 +2952,10 @@ class TranscendentalCache {
for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
}
+ ~TranscendentalCache() {
+ for (int i = 0; i < kNumberOfCaches; ++i) delete caches_[i];
+ }
+
// Used to create an external reference.
inline Address cache_array_address();
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
index 8c660597ab..20e680c145 100644
--- a/deps/v8/src/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -33,65 +33,56 @@ namespace v8 {
namespace internal {
-EnvironmentSlotLivenessAnalyzer::EnvironmentSlotLivenessAnalyzer(
+HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
HGraph* graph)
- : graph_(graph),
- zone_(graph->isolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT),
+ : HPhase("H_Environment liveness analysis", graph),
block_count_(graph->blocks()->length()),
maximum_environment_size_(graph->maximum_environment_size()),
+ live_at_block_start_(block_count_, zone()),
+ first_simulate_(block_count_, zone()),
+ first_simulate_invalid_for_index_(block_count_, zone()),
+ markers_(maximum_environment_size_, zone()),
collect_markers_(true),
- last_simulate_(NULL) {
- if (maximum_environment_size_ == 0) return;
-
- live_at_block_start_ =
- new(zone()) ZoneList<BitVector*>(block_count_, zone());
- first_simulate_ = new(zone()) ZoneList<HSimulate*>(block_count_, zone());
- first_simulate_invalid_for_index_ =
- new(zone()) ZoneList<BitVector*>(block_count_, zone());
- markers_ = new(zone())
- ZoneList<HEnvironmentMarker*>(maximum_environment_size_, zone());
- went_live_since_last_simulate_ =
- new(zone()) BitVector(maximum_environment_size_, zone());
-
+ last_simulate_(NULL),
+ went_live_since_last_simulate_(maximum_environment_size_, zone()) {
+ ASSERT(maximum_environment_size_ > 0);
for (int i = 0; i < block_count_; ++i) {
- live_at_block_start_->Add(
+ live_at_block_start_.Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
- first_simulate_->Add(NULL, zone());
- first_simulate_invalid_for_index_->Add(
+ first_simulate_.Add(NULL, zone());
+ first_simulate_invalid_for_index_.Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
}
}
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlot(int index,
- HSimulate* simulate) {
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
+ int index, HSimulate* simulate) {
int operand_index = simulate->ToOperandIndex(index);
if (operand_index == -1) {
- simulate->AddAssignedValue(index, graph_->GetConstantUndefined());
+ simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
} else {
- simulate->SetOperandAt(operand_index, graph_->GetConstantUndefined());
+ simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
}
}
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsInSuccessors(
- HBasicBlock* block,
- BitVector* live) {
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
+ HBasicBlock* block, BitVector* live) {
// When a value is live in successor A but dead in B, we must
// explicitly zap it in B.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
HBasicBlock* successor = it.Current();
int successor_id = successor->block_id();
- BitVector* live_in_successor = live_at_block_start_->at(successor_id);
+ BitVector* live_in_successor = live_at_block_start_[successor_id];
if (live_in_successor->Equals(*live)) continue;
for (int i = 0; i < live->length(); ++i) {
if (!live->Contains(i)) continue;
if (live_in_successor->Contains(i)) continue;
- if (first_simulate_invalid_for_index_->at(successor_id)->Contains(i)) {
+ if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
continue;
}
- HSimulate* simulate = first_simulate_->at(successor_id);
+ HSimulate* simulate = first_simulate_.at(successor_id);
if (simulate == NULL) continue;
ASSERT(simulate->closure().is_identical_to(
block->last_environment()->closure()));
@@ -101,7 +92,7 @@ void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsInSuccessors(
}
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsForInstruction(
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
HEnvironmentMarker* marker) {
if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
HSimulate* simulate = marker->next_simulate();
@@ -112,18 +103,18 @@ void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsForInstruction(
}
-void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtBlockEnd(
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
HBasicBlock* block,
BitVector* live) {
// Liveness at the end of each block: union of liveness in successors.
live->Clear();
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- live->Union(*live_at_block_start_->at(it.Current()->block_id()));
+ live->Union(*live_at_block_start_[it.Current()->block_id()]);
}
}
-void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
HInstruction* instr,
BitVector* live) {
switch (instr->opcode()) {
@@ -135,7 +126,7 @@ void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
} else {
marker->ClearFlag(HValue::kEndsLiveRange);
}
- if (!went_live_since_last_simulate_->Contains(index)) {
+ if (!went_live_since_last_simulate_.Contains(index)) {
marker->set_next_simulate(last_simulate_);
}
if (marker->kind() == HEnvironmentMarker::LOOKUP) {
@@ -143,11 +134,11 @@ void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
} else {
ASSERT(marker->kind() == HEnvironmentMarker::BIND);
live->Remove(index);
- went_live_since_last_simulate_->Add(index);
+ went_live_since_last_simulate_.Add(index);
}
if (collect_markers_) {
// Populate |markers_| list during the first pass.
- markers_->Add(marker, &zone_);
+ markers_.Add(marker, zone());
}
break;
}
@@ -174,8 +165,8 @@ void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
int return_id = enter->return_targets()->at(i)->block_id();
// When an AbnormalExit is involved, it can happen that the return
// target block doesn't actually exist.
- if (return_id < live_at_block_start_->length()) {
- live->Union(*live_at_block_start_->at(return_id));
+ if (return_id < live_at_block_start_.length()) {
+ live->Union(*live_at_block_start_[return_id]);
}
}
last_simulate_ = NULL;
@@ -192,7 +183,7 @@ void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
}
case HValue::kSimulate:
last_simulate_ = HSimulate::cast(instr);
- went_live_since_last_simulate_->Clear();
+ went_live_since_last_simulate_.Clear();
break;
default:
break;
@@ -200,47 +191,46 @@ void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
}
-void EnvironmentSlotLivenessAnalyzer::AnalyzeAndTrim() {
- HPhase phase("H_EnvironmentLivenessAnalysis", graph_);
- if (maximum_environment_size_ == 0) return;
+void HEnvironmentLivenessAnalysisPhase::Run() {
+ ASSERT(maximum_environment_size_ > 0);
// Main iteration. Compute liveness of environment slots, and store it
// for each block until it doesn't change any more. For efficiency, visit
// blocks in reverse order and walk backwards through each block. We
// need several iterations to propagate liveness through nested loops.
- BitVector* live = new(zone()) BitVector(maximum_environment_size_, zone());
- BitVector* worklist = new(zone()) BitVector(block_count_, zone());
+ BitVector live(maximum_environment_size_, zone());
+ BitVector worklist(block_count_, zone());
for (int i = 0; i < block_count_; ++i) {
- worklist->Add(i);
+ worklist.Add(i);
}
- while (!worklist->IsEmpty()) {
+ while (!worklist.IsEmpty()) {
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
- if (!worklist->Contains(block_id)) {
+ if (!worklist.Contains(block_id)) {
continue;
}
- worklist->Remove(block_id);
+ worklist.Remove(block_id);
last_simulate_ = NULL;
- HBasicBlock* block = graph_->blocks()->at(block_id);
- UpdateLivenessAtBlockEnd(block, live);
+ HBasicBlock* block = graph()->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, &live);
for (HInstruction* instr = block->last(); instr != NULL;
instr = instr->previous()) {
- UpdateLivenessAtInstruction(instr, live);
+ UpdateLivenessAtInstruction(instr, &live);
}
// Reached the start of the block, do necessary bookkeeping:
// store computed information for this block and add predecessors
// to the work list as necessary.
- first_simulate_->Set(block_id, last_simulate_);
- first_simulate_invalid_for_index_->at(block_id)->CopyFrom(
- *went_live_since_last_simulate_);
- if (live_at_block_start_->at(block_id)->UnionIsChanged(*live)) {
+ first_simulate_.Set(block_id, last_simulate_);
+ first_simulate_invalid_for_index_[block_id]->CopyFrom(
+ went_live_since_last_simulate_);
+ if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
for (int i = 0; i < block->predecessors()->length(); ++i) {
- worklist->Add(block->predecessors()->at(i)->block_id());
+ worklist.Add(block->predecessors()->at(i)->block_id());
}
if (block->IsInlineReturnTarget()) {
- worklist->Add(block->inlined_entry_block()->block_id());
+ worklist.Add(block->inlined_entry_block()->block_id());
}
}
}
@@ -249,18 +239,18 @@ void EnvironmentSlotLivenessAnalyzer::AnalyzeAndTrim() {
}
// Analysis finished. Zap dead environment slots.
- for (int i = 0; i < markers_->length(); ++i) {
- ZapEnvironmentSlotsForInstruction(markers_->at(i));
+ for (int i = 0; i < markers_.length(); ++i) {
+ ZapEnvironmentSlotsForInstruction(markers_[i]);
}
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- UpdateLivenessAtBlockEnd(block, live);
- ZapEnvironmentSlotsInSuccessors(block, live);
+ HBasicBlock* block = graph()->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, &live);
+ ZapEnvironmentSlotsInSuccessors(block, &live);
}
// Finally, remove the HEnvironment{Bind,Lookup} markers.
- for (int i = 0; i < markers_->length(); ++i) {
- markers_->at(i)->DeleteAndReplaceWith(NULL);
+ for (int i = 0; i < markers_.length(); ++i) {
+ markers_[i]->DeleteAndReplaceWith(NULL);
}
}
diff --git a/deps/v8/src/hydrogen-environment-liveness.h b/deps/v8/src/hydrogen-environment-liveness.h
index 484e56d52e..248ec5ce5d 100644
--- a/deps/v8/src/hydrogen-environment-liveness.h
+++ b/deps/v8/src/hydrogen-environment-liveness.h
@@ -43,11 +43,11 @@ namespace internal {
// the last lookup that refers to them.
// Slots are identified by their index and only affected if whitelisted in
// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
-class EnvironmentSlotLivenessAnalyzer {
+class HEnvironmentLivenessAnalysisPhase : public HPhase {
public:
- explicit EnvironmentSlotLivenessAnalyzer(HGraph* graph);
+ explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
- void AnalyzeAndTrim();
+ void Run();
private:
void ZapEnvironmentSlot(int index, HSimulate* simulate);
@@ -56,14 +56,6 @@ class EnvironmentSlotLivenessAnalyzer {
void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
- Zone* zone() { return &zone_; }
-
- HGraph* graph_;
- // Use a dedicated Zone for this phase, with a ZoneScope to ensure it
- // gets freed.
- Zone zone_;
- ZoneScope zone_scope_;
-
int block_count_;
// Largest number of local variables in any environment in the graph
@@ -71,21 +63,23 @@ class EnvironmentSlotLivenessAnalyzer {
int maximum_environment_size_;
// Per-block data. All these lists are indexed by block_id.
- ZoneList<BitVector*>* live_at_block_start_;
- ZoneList<HSimulate*>* first_simulate_;
- ZoneList<BitVector*>* first_simulate_invalid_for_index_;
+ ZoneList<BitVector*> live_at_block_start_;
+ ZoneList<HSimulate*> first_simulate_;
+ ZoneList<BitVector*> first_simulate_invalid_for_index_;
// List of all HEnvironmentMarker instructions for quick iteration/deletion.
// It is populated during the first pass over the graph, controlled by
// |collect_markers_|.
- ZoneList<HEnvironmentMarker*>* markers_;
+ ZoneList<HEnvironmentMarker*> markers_;
bool collect_markers_;
// Keeps track of the last simulate seen, as well as the environment slots
// for which a new live range has started since (so they must not be zapped
// in that simulate when the end of another live range of theirs is found).
HSimulate* last_simulate_;
- BitVector* went_live_since_last_simulate_;
+ BitVector went_live_since_last_simulate_;
+
+ DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
};
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
new file mode 100644
index 0000000000..e852fb8d6d
--- /dev/null
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-escape-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+void HEscapeAnalysisPhase::CollectIfNoEscapingUses(HInstruction* instr) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->HasEscapingOperandAt(it.index())) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes through #%d (%s) @%d\n", instr->id(),
+ instr->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return;
+ }
+ }
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) is being captured\n", instr->id(), instr->Mnemonic());
+ }
+ captured_.Add(instr, zone());
+}
+
+
+void HEscapeAnalysisPhase::CollectCapturedValues() {
+ int block_count = graph()->blocks()->length();
+ for (int i = 0; i < block_count; ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->IsAllocate() || instr->IsAllocateObject()) {
+ CollectIfNoEscapingUses(instr);
+ }
+ }
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-escape-analysis.h b/deps/v8/src/hydrogen-escape-analysis.h
new file mode 100644
index 0000000000..6ba6e823c5
--- /dev/null
+++ b/deps/v8/src/hydrogen-escape-analysis.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#define V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+
+#include "allocation.h"
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HEscapeAnalysisPhase : public HPhase {
+ public:
+ explicit HEscapeAnalysisPhase(HGraph* graph)
+ : HPhase("H_Escape analysis", graph), captured_(0, zone()) { }
+
+ void Run() {
+ CollectCapturedValues();
+ }
+
+ private:
+ void CollectCapturedValues();
+ void CollectIfNoEscapingUses(HInstruction* instr);
+
+ ZoneList<HValue*> captured_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index aa2dff7655..7ea2f162bf 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -361,52 +361,49 @@ void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
}
-HGlobalValueNumberer::HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
- : graph_(graph),
- info_(info),
+HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
+ : HPhase("H_Global value numbering", graph),
removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), graph->zone()),
- loop_side_effects_(graph->blocks()->length(), graph->zone()),
- visited_on_paths_(graph->zone(), graph->blocks()->length()) {
+ block_side_effects_(graph->blocks()->length(), zone()),
+ loop_side_effects_(graph->blocks()->length(), zone()),
+ visited_on_paths_(zone(), graph->blocks()->length()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
}
-bool HGlobalValueNumberer::Analyze() {
+void HGlobalValueNumberingPhase::Analyze() {
removed_side_effects_ = false;
ComputeBlockSideEffects();
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
AnalyzeGraph();
- return removed_side_effects_;
}
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
// The Analyze phase of GVN can be called multiple times. Clear loop side
// effects before computing them to erase the contents from previous Analyze
// passes.
for (int i = 0; i < loop_side_effects_.length(); ++i) {
loop_side_effects_[i].RemoveAll();
}
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+ for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
- HBasicBlock* block = graph_->blocks()->at(i);
- HInstruction* instr = block->first();
+ HBasicBlock* block = graph()->blocks()->at(i);
int id = block->block_id();
GVNFlagSet side_effects;
- while (instr != NULL) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
side_effects.Add(instr->ChangesFlags());
if (instr->IsSoftDeoptimize()) {
block_side_effects_[id].RemoveAll();
side_effects.RemoveAll();
break;
}
- instr = instr->next();
}
block_side_effects_[id].Add(side_effects);
@@ -512,11 +509,11 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
}
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
- graph_->use_optimistic_licm() ? "yes" : "no");
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph_->blocks()->at(i);
+ graph()->use_optimistic_licm() ? "yes" : "no");
+ for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
@@ -527,7 +524,7 @@ void HGlobalValueNumberer::LoopInvariantCodeMotion() {
GVNFlagSet accumulated_first_time_changes;
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
+ ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects,
&accumulated_first_time_depends,
&accumulated_first_time_changes);
}
@@ -536,7 +533,7 @@ void HGlobalValueNumberer::LoopInvariantCodeMotion() {
}
-void HGlobalValueNumberer::ProcessLoopBlock(
+void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
GVNFlagSet loop_kills,
@@ -601,20 +598,21 @@ void HGlobalValueNumberer::ProcessLoopBlock(
}
-bool HGlobalValueNumberer::AllowCodeMotion() {
+bool HGlobalValueNumberingPhase::AllowCodeMotion() {
return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
}
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
+bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
+ HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
}
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+GVNFlagSet
+HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
GVNFlagSet side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
@@ -754,8 +752,8 @@ class GvnBasicBlockState: public ZoneObject {
// into a loop to avoid stack overflows.
// The logical "stack frames" of the recursion are kept in a list of
// GvnBasicBlockState instances.
-void HGlobalValueNumberer::AnalyzeGraph() {
- HBasicBlock* entry_block = graph_->entry_block();
+void HGlobalValueNumberingPhase::AnalyzeGraph() {
+ HBasicBlock* entry_block = graph()->entry_block();
HValueMap* entry_map = new(zone()) HValueMap(zone());
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
@@ -826,7 +824,8 @@ void HGlobalValueNumberer::AnalyzeGraph() {
HBasicBlock* dominator_block;
GvnBasicBlockState* next =
- current->next_in_dominator_tree_traversal(zone(), &dominator_block);
+ current->next_in_dominator_tree_traversal(zone(),
+ &dominator_block);
if (next != NULL) {
HBasicBlock* dominated = next->block();
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index c39765a1ee..66224e4338 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -76,14 +76,24 @@ class SparseSet {
};
-class HGlobalValueNumberer BASE_EMBEDDED {
+// Perform common subexpression elimination and loop-invariant code motion.
+class HGlobalValueNumberingPhase : public HPhase {
public:
- HGlobalValueNumberer(HGraph* graph, CompilationInfo* info);
-
- // Returns true if values with side effects are removed.
- bool Analyze();
+ explicit HGlobalValueNumberingPhase(HGraph* graph);
+
+ void Run() {
+ Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values
+ // that could only be discovered by removing side-effect-generating
+ // instructions during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects_) {
+ Analyze();
+ ASSERT(!removed_side_effects_);
+ }
+ }
private:
+ void Analyze();
GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
@@ -98,12 +108,6 @@ class HGlobalValueNumberer BASE_EMBEDDED {
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
- HGraph* graph() { return graph_; }
- CompilationInfo* info() { return info_; }
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- CompilationInfo* info_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
@@ -115,6 +119,8 @@ class HGlobalValueNumberer BASE_EMBEDDED {
// Used when collecting side effects on paths from dominator to
// dominated.
SparseSet visited_on_paths_;
+
+ DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
diff --git a/deps/v8/src/hydrogen-infer-representation.cc b/deps/v8/src/hydrogen-infer-representation.cc
new file mode 100644
index 0000000000..95c341285c
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-representation.cc
@@ -0,0 +1,172 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-infer-representation.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferRepresentationPhase::AddToWorklist(HValue* current) {
+ if (current->representation().IsTagged()) return;
+ if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+ if (in_worklist_.Contains(current->id())) return;
+ worklist_.Add(current, zone());
+ in_worklist_.Add(current->id());
+}
+
+
+void HInferRepresentationPhase::Run() {
+ // (1) Initialize bit vectors and count real uses. Each phi gets a
+ // bit-vector of length <number of phis>.
+ const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+ int phi_count = phi_list->length();
+ ZoneList<BitVector*> connected_phis(phi_count, zone());
+ for (int i = 0; i < phi_count; ++i) {
+ phi_list->at(i)->InitRealUses(i);
+ BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
+ connected_set->Add(i);
+ connected_phis.Add(connected_set, zone());
+ }
+
+ // (2) Do a fixed point iteration to find the set of connected phis. A
+ // phi is connected to another phi if its value is used either directly or
+ // indirectly through a transitive closure of the def-use relation.
+ bool change = true;
+ while (change) {
+ change = false;
+ // We normally have far more "forward edges" than "backward edges",
+ // so we terminate faster when we walk backwards.
+ for (int i = phi_count - 1; i >= 0; --i) {
+ HPhi* phi = phi_list->at(i);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->IsPhi()) {
+ int id = HPhi::cast(use)->phi_id();
+ if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
+ change = true;
+ }
+ }
+ }
+ }
+
+ // Set truncation flags for groups of connected phis. This is a conservative
+ // approximation; the flag will be properly re-computed after representations
+ // have been determined.
+ if (phi_count > 0) {
+ BitVector done(phi_count, zone());
+ for (int i = 0; i < phi_count; ++i) {
+ if (done.Contains(i)) continue;
+
+ // Check if all uses of all connected phis in this group are truncating.
+ bool all_uses_everywhere_truncating = true;
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ all_uses_everywhere_truncating &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ done.Add(index);
+ }
+ if (all_uses_everywhere_truncating) {
+ continue; // Great, nothing to do.
+ }
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
+ }
+ }
+
+ // Simplify constant phi inputs where possible.
+ // This step uses kTruncatingToInt32 flags of phis.
+ for (int i = 0; i < phi_count; ++i) {
+ phi_list->at(i)->SimplifyConstantInputs();
+ }
+
+ // Use the phi reachability information from step 2 to
+ // sum up the non-phi use counts of all connected phis.
+ for (int i = 0; i < phi_count; ++i) {
+ HPhi* phi = phi_list->at(i);
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ HPhi* it_use = phi_list->at(index);
+ if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
+ }
+ }
+
+ // Initialize work list
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ AddToWorklist(phis->at(j));
+ }
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ AddToWorklist(current);
+ }
+ }
+
+ // Do a fixed point iteration, trying to improve representations
+ while (!worklist_.is_empty()) {
+ HValue* current = worklist_.RemoveLast();
+ in_worklist_.Remove(current->id());
+ current->InferRepresentation(this);
+ }
+
+ // Lastly: any instruction that we don't have representation information
+ // for defaults to Tagged.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ HPhi* phi = phis->at(j);
+ if (phi->representation().IsNone()) {
+ phi->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->representation().IsNone() &&
+ current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+ if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
+ current->ChangeRepresentation(Representation::Double());
+ } else {
+ current->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-infer-representation.h b/deps/v8/src/hydrogen-infer-representation.h
new file mode 100644
index 0000000000..7c605696c4
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-representation.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_HYDROGEN_INFER_REPRESENTATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferRepresentationPhase : public HPhase {
+ public:
+ explicit HInferRepresentationPhase(HGraph* graph)
+ : HPhase("H_Infer representations", graph),
+ worklist_(8, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+ void Run();
+ void AddToWorklist(HValue* current);
+
+ private:
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index b36706b49b..932fd47af1 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -29,7 +29,7 @@
#include "double.h"
#include "factory.h"
-#include "hydrogen.h"
+#include "hydrogen-infer-representation.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-ia32.h"
@@ -78,12 +78,16 @@ void HValue::AssumeRepresentation(Representation r) {
}
-void HValue::InferRepresentation(HInferRepresentation* h_infer) {
+void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ if (new_rep.fits_into(Representation::Integer32())) {
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+ }
}
@@ -120,10 +124,11 @@ Representation HValue::RepresentationFromUses() {
void HValue::UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
+ HInferRepresentationPhase* h_infer,
const char* reason) {
Representation r = representation();
if (new_rep.is_more_general_than(r)) {
+ if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s based on %s\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
@@ -134,7 +139,7 @@ void HValue::UpdateRepresentation(Representation new_rep,
}
-void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
+void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
h_infer->AddToWorklist(it.value());
}
@@ -1145,21 +1150,20 @@ void HBoundsCheck::PrintDataTo(StringStream* stream) {
}
-void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
+void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation r;
HValue* actual_index = index()->ActualValue();
HValue* actual_length = length()->ActualValue();
Representation index_rep = actual_index->representation();
- if (!actual_length->representation().IsSmiOrTagged()) {
- r = Representation::Integer32();
- } else if ((index_rep.IsTagged() && actual_index->type().IsSmi()) ||
- index_rep.IsSmi()) {
- // If the index is smi, allow the length to be smi, since it is usually
- // already smi from loading it out of the length field of a JSArray. This
- // allows for direct comparison without untagging.
- r = Representation::Smi();
- } else {
+ Representation length_rep = actual_length->representation();
+ if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
+ index_rep = Representation::Smi();
+ }
+ if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
+ length_rep = Representation::Smi();
+ }
+ Representation r = index_rep.generalize(length_rep);
+ if (r.is_more_general_than(Representation::Integer32())) {
r = Representation::Integer32();
}
UpdateRepresentation(r, h_infer, "boundscheck");
@@ -1221,6 +1225,13 @@ void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
}
+void HCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add(ElementsKindToString(elements_kind()));
+ stream->Add(" ");
+ HBinaryCall::PrintDataTo(stream);
+}
+
+
void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
stream->Add("#%d", argument_count());
@@ -1277,20 +1288,26 @@ void HReturn::PrintDataTo(StringStream* stream) {
Representation HBranch::observed_input_representation(int index) {
static const ToBooleanStub::Types tagged_types(
- ToBooleanStub::UNDEFINED |
ToBooleanStub::NULL_TYPE |
ToBooleanStub::SPEC_OBJECT |
ToBooleanStub::STRING |
ToBooleanStub::SYMBOL);
if (expected_input_types_.ContainsAnyOf(tagged_types)) {
return Representation::Tagged();
- } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ }
+ if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ }
+ return Representation::Tagged();
+ }
+ if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
return Representation::Double();
- } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
- return Representation::Integer32();
- } else {
- return Representation::None();
}
+ if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ return Representation::Smi();
+ }
+ return Representation::None();
}
@@ -1511,30 +1528,52 @@ void HChange::PrintDataTo(StringStream* stream) {
}
+static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ }
+ // A change from an integer32 can be replaced by the integer32 value.
+ if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
+ return NULL;
+}
+
+
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
- // If the input is integer32 then we replace the floor instruction
- // with its input. This happens before the representation changes are
- // introduced.
-
- // TODO(2205): The above comment is lying. All of this happens
- // *after* representation changes are introduced. We should check
- // for value->IsChange() and react accordingly if yes.
+ HValue* val = value();
+ if (val->IsChange()) val = HChange::cast(val)->value();
- if (value()->representation().IsInteger32()) return value();
+ // If the input is integer32 then we replace the floor instruction
+ // with its input.
+ if (val->representation().IsInteger32()) return val;
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_X64)
- if (value()->IsDiv() && (value()->UseCount() == 1)) {
- // TODO(2038): Implement this optimization for non ARM architectures.
- HDiv* hdiv = HDiv::cast(value());
+ if (val->IsDiv() && (val->UseCount() == 1)) {
+ HDiv* hdiv = HDiv::cast(val);
HValue* left = hdiv->left();
HValue* right = hdiv->right();
// Try to simplify left and right values of the division.
- HValue* new_left =
- LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
+ HValue* new_left = SimplifiedDividendForMathFloorOfDiv(left);
+ if (new_left == NULL &&
+ hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
+ new_left = new(block()->zone())
+ HChange(left, Representation::Integer32(), false, false);
+ HChange::cast(new_left)->InsertBefore(this);
+ }
HValue* new_right =
- LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+ LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+ if (new_right == NULL &&
+#if V8_TARGET_ARCH_ARM
+ CpuFeatures::IsSupported(SUDIV) &&
+#endif
+ hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
+ new_right = new(block()->zone())
+ HChange(right, Representation::Integer32(), false, false);
+ HChange::cast(new_right)->InsertBefore(this);
+ }
// Return if left or right are not optimizable.
if ((new_left == NULL) || (new_right == NULL)) return this;
@@ -1548,26 +1587,20 @@ HValue* HUnaryMathOperation::Canonicalize() {
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
- HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
- new_left,
- new_right);
+ HMathFloorOfDiv* instr = new(block()->zone())
+ HMathFloorOfDiv(context(), new_left, new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
instr->InsertBefore(this);
ReplaceAllUsesWith(instr);
Kill();
// We know the division had no other uses than this HMathFloor. Delete it.
- // Also delete the arguments of the division if they are not used any
- // more.
+ // Dead code elimination will deal with |left| and |right| if
+ // appropriate.
hdiv->DeleteAndReplaceWith(NULL);
- ASSERT(left->IsChange() || left->IsConstant());
- ASSERT(right->IsChange() || right->IsConstant());
- if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
- if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
// Return NULL to remove this instruction from the graph.
return NULL;
}
-#endif // V8_TARGET_ARCH_ARM
}
return this;
}
@@ -1739,9 +1772,12 @@ Range* HConstant::InferRange(Zone* zone) {
Range* HPhi::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
if (block()->IsLoopHeader()) {
- Range* range = new(zone) Range(kMinInt, kMaxInt);
+ Range* range = r.IsSmi()
+ ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
+ : new(zone) Range(kMinInt, kMaxInt);
return range;
} else {
Range* range = OperandAt(0)->range()->Copy(zone);
@@ -2295,7 +2331,7 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
-void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
+void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -2304,6 +2340,10 @@ void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
if (!observed_output_representation_.IsNone()) return;
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ if (new_rep.fits_into(Representation::Integer32())) {
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+ }
}
@@ -2354,7 +2394,7 @@ void HBinaryOperation::AssumeRepresentation(Representation r) {
}
-void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
+void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -2533,7 +2573,8 @@ void HGoto::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
+void HCompareIDAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
Representation observed_left = observed_input_representation(0);
@@ -3032,9 +3073,8 @@ HType HCheckFunction::CalculateInferredType() {
}
-HType HCheckNonSmi::CalculateInferredType() {
- // TODO(kasperl): Is there any way to signal that this isn't a smi?
- return HType::Tagged();
+HType HCheckHeapObject::CalculateInferredType() {
+ return HType::NonPrimitive();
}
@@ -3124,6 +3164,11 @@ HType HStringCharFromCode::CalculateInferredType() {
}
+HType HAllocateObject::CalculateInferredType() {
+ return HType::JSObject();
+}
+
+
HType HAllocate::CalculateInferredType() {
return type_;
}
@@ -3304,10 +3349,9 @@ HInstruction* HStringAdd::New(
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
- Factory* factory = Isolate::Current()->factory();
- return new(zone) HConstant(factory->NewConsString(c_left->StringValue(),
- c_right->StringValue()),
- Representation::Tagged());
+ Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+ c_left->StringValue(), c_right->StringValue());
+ return new(zone) HConstant(concat, Representation::Tagged());
}
}
return new(zone) HStringAdd(context, left, right);
@@ -3464,8 +3508,7 @@ HInstruction* HMod::New(Zone* zone,
HValue* context,
HValue* left,
HValue* right,
- bool has_fixed_right_arg,
- int fixed_right_arg_value) {
+ Maybe<int> fixed_right_arg) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -3484,11 +3527,7 @@ HInstruction* HMod::New(Zone* zone,
}
}
}
- return new(zone) HMod(context,
- left,
- right,
- has_fixed_right_arg,
- fixed_right_arg_value);
+ return new(zone) HMod(context, left, right, fixed_right_arg);
}
@@ -3640,7 +3679,7 @@ void HPhi::SimplifyConstantInputs() {
}
-void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
+void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -3660,34 +3699,26 @@ Representation HPhi::RepresentationFromInputs() {
}
-Representation HPhi::RepresentationFromUseRequirements() {
- Representation all_uses_require = Representation::None();
- bool all_uses_require_the_same = true;
+// Returns a representation if all uses agree on the same representation.
+// Integer32 is also returned when some uses are Smi but others are Integer32.
+Representation HValue::RepresentationFromUseRequirements() {
+ Representation rep = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
// We check for observed_input_representation elsewhere.
Representation use_rep =
it.value()->RequiredInputRepresentation(it.index());
- // No useful info from this use -> look at the next one.
- if (use_rep.IsNone()) {
+ if (rep.IsNone()) {
+ rep = use_rep;
continue;
}
- if (use_rep.Equals(all_uses_require)) {
+ if (use_rep.IsNone() || rep.Equals(use_rep)) continue;
+ if (rep.generalize(use_rep).IsInteger32()) {
+ rep = Representation::Integer32();
continue;
}
- // This use's representation contradicts what we've seen so far.
- if (!all_uses_require.IsNone()) {
- ASSERT(!use_rep.Equals(all_uses_require));
- all_uses_require_the_same = false;
- break;
- }
- // Otherwise, initialize observed representation.
- all_uses_require = use_rep;
- }
- if (all_uses_require_the_same) {
- return all_uses_require;
+ return Representation::None();
}
-
- return Representation::None();
+ return rep;
}
@@ -3712,7 +3743,7 @@ void HSimulate::Verify() {
}
-void HCheckNonSmi::Verify() {
+void HCheckHeapObject::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 82ed261eb9..26bda87caa 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -45,7 +45,7 @@ namespace internal {
// Forward declarations.
class HBasicBlock;
class HEnvironment;
-class HInferRepresentation;
+class HInferRepresentationPhase;
class HInstruction;
class HLoopInformation;
class HValue;
@@ -66,6 +66,7 @@ class LChunkBuilder;
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -88,9 +89,9 @@ class LChunkBuilder;
V(CallStub) \
V(Change) \
V(CheckFunction) \
+ V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
- V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
@@ -110,7 +111,6 @@ class LChunkBuilder;
V(ElementsKind) \
V(EnterInlined) \
V(EnvironmentMarker) \
- V(FixedArrayBaseLength) \
V(ForceRepresentation) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -433,7 +433,7 @@ class HType {
bool IsHeapObject() const {
ASSERT(type_ != kUninitialized);
- return IsHeapNumber() || IsString() || IsNonPrimitive();
+ return IsHeapNumber() || IsString() || IsBoolean() || IsNonPrimitive();
}
static HType TypeFromValue(Handle<Object> value);
@@ -783,6 +783,7 @@ class HValue: public ZoneObject {
enum Flag {
kFlexibleRepresentation,
+ kCannotBeTagged,
// Participate in Global Value Numbering, i.e. elimination of
// unnecessary recomputations. If an instruction sets this flag, it must
// implement DataEquals(), which will be used to determine if other
@@ -888,6 +889,7 @@ class HValue: public ZoneObject {
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
ASSERT(CheckFlag(kFlexibleRepresentation));
+ ASSERT(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
RepresentationChanged(r);
representation_ = r;
if (r.IsTagged()) {
@@ -915,6 +917,10 @@ class HValue: public ZoneObject {
type_ = new_type;
}
+ bool IsHeapObject() {
+ return representation_.IsHeapObject() || type_.IsHeapObject();
+ }
+
// An operation needs to override this function iff:
// 1) it can produce an int32 output.
// 2) the true value of its output can potentially be minus zero.
@@ -1055,12 +1061,15 @@ class HValue: public ZoneObject {
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
+ // Escape analysis helpers.
+ virtual bool HasEscapingOperandAt(int index) { return true; }
+
// Representation helpers.
virtual Representation observed_input_representation(int index) {
return Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) = 0;
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
@@ -1150,10 +1159,11 @@ class HValue: public ZoneObject {
return representation();
}
Representation RepresentationFromUses();
+ Representation RepresentationFromUseRequirements();
virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
+ HInferRepresentationPhase* h_infer,
const char* reason);
- void AddDependantsToWorklist(HInferRepresentation* h_infer);
+ void AddDependantsToWorklist(HInferRepresentationPhase* h_infer);
virtual void RepresentationChanged(Representation to) { }
@@ -1425,6 +1435,7 @@ class HDummyUse: public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
+ virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1595,17 +1606,21 @@ class HBranch: public HUnaryControlInstruction {
HBranch(HValue* value,
HBasicBlock* true_target,
HBasicBlock* false_target,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types())
: HUnaryControlInstruction(value, true_target, false_target),
expected_input_types_(expected_input_types) {
ASSERT(true_target != NULL && false_target != NULL);
+ SetFlag(kAllowUndefinedAsNaN);
}
explicit HBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
HBranch(HValue* value, ToBooleanStub::Types expected_input_types)
: HUnaryControlInstruction(value, NULL, NULL),
- expected_input_types_(expected_input_types) { }
-
+ expected_input_types_(expected_input_types) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1880,6 +1895,7 @@ class HSimulate: public HInstruction {
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) const { return values_[index]; }
+ virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2016,6 +2032,9 @@ enum InliningKind {
};
+class HArgumentsObject;
+
+
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
@@ -2023,7 +2042,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
- ZoneList<HValue*>* arguments_values,
+ HArgumentsObject* arguments_object,
bool undefined_receiver,
Zone* zone)
: closure_(closure),
@@ -2032,7 +2051,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
function_(function),
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
- arguments_values_(arguments_values),
+ arguments_object_(arguments_object),
undefined_receiver_(undefined_receiver),
return_targets_(2, zone) {
}
@@ -2055,7 +2074,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
}
Variable* arguments_var() { return arguments_var_; }
- ZoneList<HValue*>* arguments_values() { return arguments_values_; }
+ HArgumentsObject* arguments_object() { return arguments_object_; }
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
@@ -2066,7 +2085,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function_;
InliningKind inlining_kind_;
Variable* arguments_var_;
- ZoneList<HValue*>* arguments_values_;
+ HArgumentsObject* arguments_object_;
bool undefined_receiver_;
ZoneList<HBasicBlock*> return_targets_;
};
@@ -2473,14 +2492,14 @@ class HCallNew: public HBinaryCall {
class HCallNewArray: public HCallNew {
public:
HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<JSGlobalPropertyCell> type_cell)
+ Handle<Cell> type_cell, ElementsKind elements_kind)
: HCallNew(context, constructor, argument_count),
- type_cell_(type_cell) {
- elements_kind_ = static_cast<ElementsKind>(
- Smi::cast(type_cell->value())->value());
- }
+ elements_kind_(elements_kind),
+ type_cell_(type_cell) {}
- Handle<JSGlobalPropertyCell> property_cell() const {
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<Cell> property_cell() const {
return type_cell_;
}
@@ -2490,7 +2509,7 @@ class HCallNewArray: public HCallNew {
private:
ElementsKind elements_kind_;
- Handle<JSGlobalPropertyCell> type_cell_;
+ Handle<Cell> type_cell_;
};
@@ -2522,29 +2541,6 @@ class HCallRuntime: public HCall<1> {
};
-class HFixedArrayBaseLength: public HUnaryOperation {
- public:
- explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
- set_type(HType::Smi());
- set_representation(Representation::Smi());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnArrayLengths);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
class HMapEnumLength: public HUnaryOperation {
public:
explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
@@ -2786,6 +2782,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
return check_map;
}
+ virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2937,9 +2934,9 @@ class HCheckInstanceType: public HUnaryOperation {
};
-class HCheckNonSmi: public HUnaryOperation {
+class HCheckHeapObject: public HUnaryOperation {
public:
- explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
+ explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -2956,17 +2953,13 @@ class HCheckNonSmi: public HUnaryOperation {
virtual HValue* Canonicalize() {
HType value_type = value()->type();
- if (!value_type.IsUninitialized() &&
- (value_type.IsHeapNumber() ||
- value_type.IsString() ||
- value_type.IsBoolean() ||
- value_type.IsNonPrimitive())) {
+ if (!value_type.IsUninitialized() && value_type.IsHeapObject()) {
return NULL;
}
return this;
}
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
+ DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2977,21 +2970,33 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
public:
HCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder,
- Zone* zone)
+ Zone* zone,
+ CompilationInfo* info)
: prototypes_(2, zone),
maps_(2, zone),
first_prototype_unique_id_(),
- last_prototype_unique_id_() {
+ last_prototype_unique_id_(),
+ can_omit_prototype_maps_(true) {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// Keep a list of all objects on the prototype chain up to the holder
// and the expected maps.
while (true) {
prototypes_.Add(prototype, zone);
- maps_.Add(Handle<Map>(prototype->map()), zone);
+ Handle<Map> map(prototype->map());
+ maps_.Add(map, zone);
+ can_omit_prototype_maps_ &= map->CanOmitPrototypeChecks();
if (prototype.is_identical_to(holder)) break;
prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
}
+ if (can_omit_prototype_maps_) {
+ // Mark in-flight compilation as dependent on those maps.
+ for (int i = 0; i < maps()->length(); i++) {
+ Handle<Map> map = maps()->at(i);
+ map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
+ info);
+ }
+ }
}
ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
@@ -3016,12 +3021,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
}
- bool CanOmitPrototypeChecks() {
- for (int i = 0; i < maps()->length(); i++) {
- if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
- }
- return true;
- }
+ bool CanOmitPrototypeChecks() { return can_omit_prototype_maps_; }
protected:
virtual bool DataEquals(HValue* other) {
@@ -3035,6 +3035,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
ZoneList<Handle<Map> > maps_;
UniqueValueId first_prototype_unique_id_;
UniqueValueId last_prototype_unique_id_;
+ bool can_omit_prototype_maps_;
};
@@ -3056,8 +3057,7 @@ class HPhi: public HValue {
virtual Representation RepresentationFromInputs();
virtual Range* InferRange(Zone* zone);
- virtual void InferRepresentation(HInferRepresentation* h_infer);
- Representation RepresentationFromUseRequirements();
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
@@ -3194,25 +3194,44 @@ class HInductionVariableAnnotation : public HUnaryOperation {
class HArgumentsObject: public HTemplateInstruction<0> {
public:
- HArgumentsObject() {
+ HArgumentsObject(int count, Zone* zone) : values_(count, zone) {
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
+ const ZoneList<HValue*>* arguments_values() const { return &values_; }
+ int arguments_count() const { return values_.length(); }
+
+ void AddArgument(HValue* argument, Zone* zone) {
+ values_.Add(NULL, zone); // Resize list.
+ SetOperandAt(values_.length() - 1, argument);
+ }
+
+ virtual int OperandCount() { return values_.length(); }
+ virtual HValue* OperandAt(int index) const { return values_[index]; }
+
+ virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value) {
+ values_[index] = value;
+ }
+
private:
virtual bool IsDeletable() const { return true; }
+
+ ZoneList<HValue*> values_;
};
class HConstant: public HTemplateInstruction<0> {
public:
- HConstant(Handle<Object> handle, Representation r);
+ HConstant(Handle<Object> handle, Representation r = Representation::None());
HConstant(int32_t value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
@@ -3461,12 +3480,12 @@ class HBinaryOperation: public HTemplateInstruction<3> {
return observed_input_representation_[index - 1];
}
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
virtual Representation RepresentationFromInputs();
virtual void AssumeRepresentation(Representation r);
virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
+ HInferRepresentationPhase* h_infer,
const char* reason) {
// By default, binary operations don't handle Smis.
if (new_rep.IsSmi()) {
@@ -3678,7 +3697,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
int scale = 0);
virtual void PrintDataTo(StringStream* stream);
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
@@ -3775,7 +3794,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
}
virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
+ HInferRepresentationPhase* h_infer,
const char* reason) {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsSmi() || new_rep.IsDouble()) {
@@ -3910,7 +3929,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
observed_input_representation_[1] = right;
}
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
virtual Representation RequiredInputRepresentation(int index) {
return representation();
@@ -4432,11 +4451,9 @@ class HMod: public HArithmeticBinaryOperation {
HValue* context,
HValue* left,
HValue* right,
- bool has_fixed_right_arg,
- int fixed_right_arg_value);
+ Maybe<int> fixed_right_arg);
- bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
- int fixed_right_arg_value() const { return fixed_right_arg_value_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -4463,17 +4480,14 @@ class HMod: public HArithmeticBinaryOperation {
HMod(HValue* context,
HValue* left,
HValue* right,
- bool has_fixed_right_arg,
- int fixed_right_arg_value)
+ Maybe<int> fixed_right_arg)
: HArithmeticBinaryOperation(context, left, right),
- has_fixed_right_arg_(has_fixed_right_arg),
- fixed_right_arg_value_(fixed_right_arg_value) {
+ fixed_right_arg_(fixed_right_arg) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
- const bool has_fixed_right_arg_;
- const int fixed_right_arg_value_;
+ const Maybe<int> fixed_right_arg_;
};
@@ -4532,7 +4546,7 @@ class HMathMinMax: public HArithmeticBinaryOperation {
return RequiredInputRepresentation(index);
}
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
virtual Representation RepresentationFromInputs() {
Representation left_rep = left()->representation();
@@ -4824,14 +4838,14 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
+ HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
: cell_(cell), details_(details), unique_id_() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnGlobalVars);
}
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ Handle<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream);
@@ -4859,7 +4873,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
private:
virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
- Handle<JSGlobalPropertyCell> cell_;
+ Handle<Cell> cell_;
PropertyDetails details_;
UniqueValueId unique_id_;
};
@@ -4898,6 +4912,48 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
+class HAllocateObject: public HTemplateInstruction<1> {
+ public:
+ HAllocateObject(HValue* context, Handle<JSFunction> constructor)
+ : constructor_(constructor) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ constructor_initial_map_ = constructor->has_initial_map()
+ ? Handle<Map>(constructor->initial_map())
+ : Handle<Map>::null();
+ // If slack tracking finished, the instance size and property counts
+ // remain unchanged so that we can allocate memory for the object.
+ ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+ }
+
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxSize = 64 * kPointerSize;
+
+ HValue* context() { return OperandAt(0); }
+ Handle<JSFunction> constructor() { return constructor_; }
+ Handle<Map> constructor_initial_map() { return constructor_initial_map_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ ASSERT(!constructor_initial_map_.is_null());
+ return constructor_initial_map_;
+ }
+ virtual HType CalculateInferredType();
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
+
+ private:
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // virtual bool IsDeletable() const { return true; }
+
+ Handle<JSFunction> constructor_;
+ Handle<Map> constructor_initial_map_;
+};
+
+
class HAllocate: public HTemplateInstruction<2> {
public:
enum Flags {
@@ -4916,9 +4972,6 @@ class HAllocate: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxInlineSize = 64 * kPointerSize;
-
static Flags DefaultFlags() {
return CAN_ALLOCATE_IN_NEW_SPACE;
}
@@ -4943,14 +4996,6 @@ class HAllocate: public HTemplateInstruction<2> {
}
}
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- return known_initial_map_;
- }
-
- void set_known_initial_map(Handle<Map> known_initial_map) {
- known_initial_map_ = known_initial_map;
- }
-
virtual HType CalculateInferredType();
bool CanAllocateInNewSpace() const {
@@ -4985,7 +5030,6 @@ class HAllocate: public HTemplateInstruction<2> {
private:
HType type_;
Flags flags_;
- Handle<Map> known_initial_map_;
};
@@ -5029,6 +5073,7 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
new_space_dominator);
}
if (object != new_space_dominator) return true;
+ if (object->IsAllocateObject()) return false;
if (object->IsAllocate()) {
return !HAllocate::cast(object)->GuaranteedInNewSpace();
}
@@ -5039,7 +5084,7 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
class HStoreGlobalCell: public HUnaryOperation {
public:
HStoreGlobalCell(HValue* value,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
PropertyDetails details)
: HUnaryOperation(value),
cell_(cell),
@@ -5047,7 +5092,7 @@ class HStoreGlobalCell: public HUnaryOperation {
SetGVNFlag(kChangesGlobalVars);
}
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ Handle<PropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
@@ -5063,7 +5108,7 @@ class HStoreGlobalCell: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
private:
- Handle<JSGlobalPropertyCell> cell_;
+ Handle<PropertyCell> cell_;
PropertyDetails details_;
};
@@ -5369,6 +5414,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
HObjectAccess access() const { return access_; }
Representation field_representation() const { return representation_; }
+ virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -5691,6 +5737,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
= Representation::Tagged())
: access_(access),
field_representation_(field_representation),
+ transition_(),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
@@ -5700,6 +5747,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
+ virtual bool HasEscapingOperandAt(int index) { return index == 1; }
virtual Representation RequiredInputRepresentation(int index) {
if (FLAG_track_double_fields &&
index == 1 && field_representation_.IsDouble()) {
@@ -5722,7 +5770,13 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HObjectAccess access() const { return access_; }
Handle<Map> transition() const { return transition_; }
UniqueValueId transition_unique_id() const { return transition_unique_id_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
+ void SetTransition(Handle<Map> map, CompilationInfo* info) {
+ ASSERT(transition_.is_null()); // Only set once.
+ if (map->CanBeDeprecated()) {
+ map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
+ }
+ transition_ = map;
+ }
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
@@ -5799,6 +5853,7 @@ class HStoreKeyed
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false),
+ is_uninitialized_(false),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -5826,6 +5881,7 @@ class HStoreKeyed
}
}
+ virtual bool HasEscapingOperandAt(int index) { return index != 0; }
virtual Representation RequiredInputRepresentation(int index) {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
@@ -5858,6 +5914,9 @@ class HStoreKeyed
virtual Representation observed_input_representation(int index) {
if (index < 2) return RequiredInputRepresentation(index);
+ if (IsUninitialized()) {
+ return Representation::None();
+ }
if (IsFastSmiElementsKind(elements_kind())) {
return Representation::Smi();
}
@@ -5884,6 +5943,10 @@ class HStoreKeyed
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ bool IsUninitialized() { return is_uninitialized_; }
+ void SetUninitialized(bool is_uninitialized) {
+ is_uninitialized_ = is_uninitialized;
+ }
bool IsConstantHoleStore() {
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
@@ -5914,7 +5977,8 @@ class HStoreKeyed
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
- bool is_dehoisted_;
+ bool is_dehoisted_ : 1;
+ bool is_uninitialized_ : 1;
HValue* new_space_dominator_;
};
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
new file mode 100644
index 0000000000..19a1c77442
--- /dev/null
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+// True iff. we are compiling for OSR and the statement is the entry.
+bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
+ return statement->OsrEntryId() == builder_->current_info()->osr_ast_id();
+}
+
+
+// Build a new loop header block and set it as the current block.
+HBasicBlock *HOsrBuilder::BuildLoopEntry() {
+ HBasicBlock* loop_entry = builder_->CreateLoopHeaderBlock();
+ builder_->current_block()->Goto(loop_entry);
+ builder_->set_current_block(loop_entry);
+ return loop_entry;
+}
+
+
+HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
+ IterationStatement* statement) {
+ // Check if there is an OSR here first.
+ if (!HasOsrEntryAt(statement)) return BuildLoopEntry();
+
+ Zone* zone = builder_->zone();
+ HGraph* graph = builder_->graph();
+
+ // only one OSR point per compile is allowed.
+ ASSERT(graph->osr() == NULL);
+
+ // remember this builder as the one OSR builder in the graph.
+ graph->set_osr(this);
+
+ HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
+ osr_entry_ = graph->CreateBasicBlock();
+ HValue* true_value = graph->GetConstantTrue();
+ HBranch* test = new(zone) HBranch(true_value, non_osr_entry, osr_entry_);
+ builder_->current_block()->Finish(test);
+
+ HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
+ non_osr_entry->Goto(loop_predecessor);
+
+ builder_->set_current_block(osr_entry_);
+ osr_entry_->set_osr_entry();
+ BailoutId osr_entry_id = statement->OsrEntryId();
+
+ HEnvironment *environment = builder_->environment();
+ int first_expression_index = environment->first_expression_index();
+ int length = environment->length();
+ osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
+
+ for (int i = 0; i < first_expression_index; ++i) {
+ HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ environment->Bind(i, osr_value);
+ osr_values_->Add(osr_value, zone);
+ }
+
+ if (first_expression_index != length) {
+ environment->Drop(length - first_expression_index);
+ for (int i = first_expression_index; i < length; ++i) {
+ HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ environment->Push(osr_value);
+ osr_values_->Add(osr_value, zone);
+ }
+ }
+
+ builder_->AddSimulate(osr_entry_id);
+ builder_->Add<HOsrEntry>(osr_entry_id);
+ HContext* context = builder_->Add<HContext>();
+ environment->BindContext(context);
+ builder_->current_block()->Goto(loop_predecessor);
+ loop_predecessor->SetJoinId(statement->EntryId());
+ builder_->set_current_block(loop_predecessor);
+
+ // Create the final loop entry
+ osr_loop_entry_ = BuildLoopEntry();
+ return osr_loop_entry_;
+}
+
+
+void HOsrBuilder::FinishGraph() {
+ // do nothing for now.
+}
+
+
+void HOsrBuilder::FinishOsrValues() {
+ const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ HPhi* phi = phis->at(j);
+ osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/hydrogen-osr.h
new file mode 100644
index 0000000000..0c6b65d0d4
--- /dev/null
+++ b/deps/v8/src/hydrogen-osr.h
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_OSR_H_
+#define V8_HYDROGEN_OSR_H_
+
+#include "hydrogen.h"
+#include "ast.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Responsible for building graph parts related to OSR and otherwise
+// setting up the graph to do an OSR compile.
+class HOsrBuilder : public ZoneObject {
+ public:
+ explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
+ : builder_(builder),
+ osr_entry_(NULL),
+ osr_loop_entry_(NULL),
+ osr_values_(NULL) { }
+ // Creates the loop entry block for the given statement, setting up OSR
+ // entries as necessary, and sets the current block to the new block.
+ HBasicBlock* BuildPossibleOsrLoopEntry(IterationStatement* statement);
+
+ // Process the hydrogen graph after it has been completed, performing
+ // any OSR-specific cleanups or changes.
+ void FinishGraph();
+
+ // Process the OSR values and phis after initial graph optimization.
+ void FinishOsrValues();
+
+ private:
+ HBasicBlock* BuildLoopEntry();
+ bool HasOsrEntryAt(IterationStatement* statement);
+
+ HOptimizedGraphBuilder* builder_;
+ HBasicBlock* osr_entry_;
+ HBasicBlock* osr_loop_entry_;
+ ZoneList<HUnknownOSRValue*>* osr_values_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_OSR_H_
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
new file mode 100644
index 0000000000..0d4d9700de
--- /dev/null
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -0,0 +1,169 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-range-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
+ if (FLAG_trace_range) {
+ va_list arguments;
+ va_start(arguments, msg);
+ OS::VPrint(msg, arguments);
+ va_end(arguments);
+ }
+}
+
+
+void HRangeAnalysisPhase::Analyze(HBasicBlock* block) {
+ TraceRange("Analyzing block B%d\n", block->block_id());
+
+ int last_changed_range = changed_ranges_.length() - 1;
+
+ // Infer range based on control flow.
+ if (block->predecessors()->length() == 1) {
+ HBasicBlock* pred = block->predecessors()->first();
+ if (pred->end()->IsCompareIDAndBranch()) {
+ InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
+ }
+ }
+
+ // Process phi instructions.
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ InferRange(phi);
+ }
+
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ InferRange(it.Current());
+ }
+
+ // Continue analysis in all dominated blocks.
+ for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+ Analyze(block->dominated_blocks()->at(i));
+ }
+
+ RollBackTo(last_changed_range);
+}
+
+
+void HRangeAnalysisPhase::InferControlFlowRange(HCompareIDAndBranch* test,
+ HBasicBlock* dest) {
+ ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+ if (test->representation().IsSmiOrInteger32()) {
+ Token::Value op = test->token();
+ if (test->SecondSuccessor() == dest) {
+ op = Token::NegateCompareOp(op);
+ }
+ Token::Value inverted_op = Token::ReverseCompareOp(op);
+ UpdateControlFlowRange(op, test->left(), test->right());
+ UpdateControlFlowRange(inverted_op, test->right(), test->left());
+ }
+}
+
+
+// We know that value [op] other. Use this information to update the range on
+// value.
+void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op,
+ HValue* value,
+ HValue* other) {
+ Range temp_range;
+ Range* range = other->range() != NULL ? other->range() : &temp_range;
+ Range* new_range = NULL;
+
+ TraceRange("Control flow range infer %d %s %d\n",
+ value->id(),
+ Token::Name(op),
+ other->id());
+
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ // The same range has to apply for value.
+ new_range = range->Copy(graph()->zone());
+ } else if (op == Token::LT || op == Token::LTE) {
+ new_range = range->CopyClearLower(graph()->zone());
+ if (op == Token::LT) {
+ new_range->AddConstant(-1);
+ }
+ } else if (op == Token::GT || op == Token::GTE) {
+ new_range = range->CopyClearUpper(graph()->zone());
+ if (op == Token::GT) {
+ new_range->AddConstant(1);
+ }
+ }
+
+ if (new_range != NULL && !new_range->IsMostGeneric()) {
+ AddRange(value, new_range);
+ }
+}
+
+
+void HRangeAnalysisPhase::InferRange(HValue* value) {
+ ASSERT(!value->HasRange());
+ if (!value->representation().IsNone()) {
+ value->ComputeInitialRange(graph()->zone());
+ Range* range = value->range();
+ TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
+ value->id(),
+ value->Mnemonic(),
+ range->lower(),
+ range->upper());
+ }
+}
+
+
+void HRangeAnalysisPhase::RollBackTo(int index) {
+ for (int i = index + 1; i < changed_ranges_.length(); ++i) {
+ changed_ranges_[i]->RemoveLastAddedRange();
+ }
+ changed_ranges_.Rewind(index + 1);
+}
+
+
+void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
+ Range* original_range = value->range();
+ value->AddNewRange(range, graph()->zone());
+ changed_ranges_.Add(value, zone());
+ Range* new_range = value->range();
+ TraceRange("Updated range of %d set to [%d,%d]\n",
+ value->id(),
+ new_range->lower(),
+ new_range->upper());
+ if (original_range != NULL) {
+ TraceRange("Original range was [%d,%d]\n",
+ original_range->lower(),
+ original_range->upper());
+ }
+ TraceRange("New information was [%d,%d]\n",
+ range->lower(),
+ range->upper());
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-range-analysis.h b/deps/v8/src/hydrogen-range-analysis.h
new file mode 100644
index 0000000000..52ce109c87
--- /dev/null
+++ b/deps/v8/src/hydrogen-range-analysis.h
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_
+#define V8_HYDROGEN_RANGE_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HRangeAnalysisPhase : public HPhase {
+ public:
+ explicit HRangeAnalysisPhase(HGraph* graph)
+ : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
+
+ void Run() {
+ Analyze(graph()->entry_block());
+ }
+
+ private:
+ void TraceRange(const char* msg, ...);
+ void Analyze(HBasicBlock* block);
+ void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
+ void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
+ void InferRange(HValue* value);
+ void RollBackTo(int index);
+ void AddRange(HValue* value, Range* range);
+
+ ZoneList<HValue*> changed_ranges_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
new file mode 100644
index 0000000000..67219f55df
--- /dev/null
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -0,0 +1,231 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-uint32-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
+ // Operations that operate on bits are safe.
+ if (use->IsBitwise() ||
+ use->IsShl() ||
+ use->IsSar() ||
+ use->IsShr() ||
+ use->IsBitNot()) {
+ return true;
+ } else if (use->IsChange() || use->IsSimulate()) {
+ // Conversions and deoptimization have special support for unt32.
+ return true;
+ } else if (use->IsStoreKeyed()) {
+ HStoreKeyed* store = HStoreKeyed::cast(use);
+ if (store->is_external()) {
+ // Storing a value into an external integer array is a bit level
+ // operation.
+ if (store->value() == val) {
+ // Clamping or a conversion to double should have beed inserted.
+ ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
+// Iterate over all uses and verify that they are uint32 safe: either don't
+// distinguish between int32 and uint32 due to their bitwise nature or
+// have special support for uint32 values.
+// Encountered phis are optimistically treated as safe uint32 uses,
+// marked with kUint32 flag and collected in the phis_ list. A separate
+// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from
+// phis that are not actually uint32-safe (it requires fix point iteration).
+bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) {
+ bool collect_phi_uses = false;
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ if (use->IsPhi()) {
+ if (!use->CheckFlag(HInstruction::kUint32)) {
+ // There is a phi use of this value from a phi that is not yet
+ // collected in phis_ array. Separate pass is required.
+ collect_phi_uses = true;
+ }
+
+ // Optimistically treat phis as uint32 safe.
+ continue;
+ }
+
+ if (!IsSafeUint32Use(uint32val, use)) {
+ return false;
+ }
+ }
+
+ if (collect_phi_uses) {
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ // There is a phi use of this value from a phi that is not yet
+ // collected in phis_ array. Separate pass is required.
+ if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
+ use->SetFlag(HInstruction::kUint32);
+ phis_.Add(HPhi::cast(use), zone());
+ }
+ }
+ }
+
+ return true;
+}
+
+
+// Check if all operands to the given phi are marked with kUint32 flag.
+bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) {
+ if (!phi->CheckFlag(HInstruction::kUint32)) {
+ // This phi is not uint32 safe. No need to check operands.
+ return false;
+ }
+
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (!operand->CheckFlag(HInstruction::kUint32)) {
+ // Lazily mark constants that fit into uint32 range with kUint32 flag.
+ if (operand->IsInteger32Constant() &&
+ operand->GetInteger32Constant() >= 0) {
+ operand->SetFlag(HInstruction::kUint32);
+ continue;
+ }
+
+ // This phi is not safe, some operands are not uint32 values.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+// Remove kUint32 flag from the phi itself and its operands. If any operand
+// was a phi marked with kUint32 place it into a worklist for
+// transitive clearing of kUint32 flag.
+void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
+ phi->ClearFlag(HInstruction::kUint32);
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (operand->CheckFlag(HInstruction::kUint32)) {
+ operand->ClearFlag(HInstruction::kUint32);
+ if (operand->IsPhi()) {
+ worklist->Add(HPhi::cast(operand), zone());
+ }
+ }
+ }
+}
+
+
+void HUint32AnalysisPhase::UnmarkUnsafePhis() {
+ // No phis were collected. Nothing to do.
+ if (phis_.length() == 0) return;
+
+ // Worklist used to transitively clear kUint32 from phis that
+ // are used as arguments to other phis.
+ ZoneList<HPhi*> worklist(phis_.length(), zone());
+
+ // Phi can be used as a uint32 value if and only if
+ // all its operands are uint32 values and all its
+ // uses are uint32 safe.
+
+ // Iterate over collected phis and unmark those that
+ // are unsafe. When unmarking phi unmark its operands
+ // and add it to the worklist if it is a phi as well.
+ // Phis that are still marked as safe are shifted down
+ // so that all safe phis form a prefix of the phis_ array.
+ int phi_count = 0;
+ for (int i = 0; i < phis_.length(); i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
+ phis_[phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+
+ // Now phis array contains only those phis that have safe
+ // non-phi uses. Start transitively clearing kUint32 flag
+ // from phi operands of discovered non-safe phis until
+ // only safe phis are left.
+ while (!worklist.is_empty()) {
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ UnmarkPhi(phi, &worklist);
+ }
+
+ // Check if any operands to safe phis were unmarked
+ // turning a safe phi into unsafe. The same value
+ // can flow into several phis.
+ int new_phi_count = 0;
+ for (int i = 0; i < phi_count; i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi)) {
+ phis_[new_phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+ phi_count = new_phi_count;
+ }
+}
+
+
+void HUint32AnalysisPhase::Run() {
+ if (!graph()->has_uint32_instructions()) return;
+
+ ZoneList<HInstruction*>* uint32_instructions = graph()->uint32_instructions();
+ for (int i = 0; i < uint32_instructions->length(); ++i) {
+ // Analyze instruction and mark it with kUint32 if all
+ // its uses are uint32 safe.
+ HInstruction* current = uint32_instructions->at(i);
+ if (current->IsLinked() &&
+ current->representation().IsInteger32() &&
+ Uint32UsesAreSafe(current)) {
+ current->SetFlag(HInstruction::kUint32);
+ }
+ }
+
+ // Some phis might have been optimistically marked with kUint32 flag.
+ // Remove this flag from those phis that are unsafe and propagate
+ // this information transitively potentially clearing kUint32 flag
+ // from some non-phi operations that are used as operands to unsafe phis.
+ UnmarkUnsafePhis();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-uint32-analysis.h b/deps/v8/src/hydrogen-uint32-analysis.h
new file mode 100644
index 0000000000..59739d1ccf
--- /dev/null
+++ b/deps/v8/src/hydrogen-uint32-analysis.h
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_
+#define V8_HYDROGEN_UINT32_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Discover instructions that can be marked with kUint32 flag allowing
+// them to produce full range uint32 values.
+class HUint32AnalysisPhase : public HPhase {
+ public:
+ explicit HUint32AnalysisPhase(HGraph* graph)
+ : HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { }
+
+ void Run();
+
+ private:
+ INLINE(bool IsSafeUint32Use(HValue* val, HValue* use));
+ INLINE(bool Uint32UsesAreSafe(HValue* uint32val));
+ INLINE(bool CheckPhiOperands(HPhi* phi));
+ INLINE(void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist));
+ INLINE(void UnmarkUnsafePhis());
+
+ ZoneList<HPhi*> phis_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index b2badcdb50..7679f93257 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen.h"
-#include "hydrogen-gvn.h"
#include <algorithm>
@@ -35,6 +34,12 @@
#include "full-codegen.h"
#include "hashmap.h"
#include "hydrogen-environment-liveness.h"
+#include "hydrogen-escape-analysis.h"
+#include "hydrogen-infer-representation.h"
+#include "hydrogen-gvn.h"
+#include "hydrogen-osr.h"
+#include "hydrogen-range-analysis.h"
+#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
#include "scopeinfo.h"
@@ -526,7 +531,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
- Heap::RelocationLock(isolate()->heap());
+ Heap::RelocationLock relocation_lock(isolate()->heap());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
@@ -709,13 +714,9 @@ HGraphBuilder::IfBuilder::IfBuilder(
HInstruction* HGraphBuilder::IfBuilder::IfCompare(
HValue* left,
HValue* right,
- Token::Value token,
- Representation input_representation) {
+ Token::Value token) {
HCompareIDAndBranch* compare =
new(zone()) HCompareIDAndBranch(left, right, token);
- compare->set_observed_input_representation(input_representation,
- input_representation);
- compare->AssumeRepresentation(input_representation);
AddCompare(compare);
return compare;
}
@@ -807,7 +808,7 @@ void HGraphBuilder::IfBuilder::Then() {
// so that the graph builder visits it and sees any live range extending
// constructs within it.
HConstant* constant_false = builder_->graph()->GetConstantFalse();
- ToBooleanStub::Types boolean_type = ToBooleanStub::no_types();
+ ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
HBranch* branch =
new(zone()) HBranch(constant_false, first_true_block_,
@@ -967,7 +968,7 @@ void HGraphBuilder::LoopBuilder::EndBody() {
HGraph* HGraphBuilder::CreateGraph() {
graph_ = new(zone()) HGraph(info_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
- HPhase phase("H_Block building", isolate());
+ CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
graph()->FinalizeUniqueValueIds();
@@ -993,18 +994,10 @@ void HGraphBuilder::AddSimulate(BailoutId id,
}
-HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index, HValue* length) {
- HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(index, length);
- AddInstruction(result);
- return result;
-}
-
-
HReturn* HGraphBuilder::AddReturn(HValue* value) {
HValue* context = environment()->LookupContext();
int num_parameters = graph()->info()->num_parameters();
- HValue* params = AddInstruction(new(graph()->zone())
- HConstant(num_parameters, Representation::Integer32()));
+ HValue* params = Add<HConstant>(num_parameters);
HReturn* return_instruction = new(graph()->zone())
HReturn(value, context, params);
current_block()->FinishExit(return_instruction);
@@ -1028,11 +1021,9 @@ HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
}
-HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) {
+HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
if (obj->type().IsHeapObject()) return obj;
- HCheckNonSmi* check = new(zone()) HCheckNonSmi(obj);
- AddInstruction(check);
- return check;
+ return Add<HCheckHeapObject>(obj);
}
@@ -1056,7 +1047,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
ASSERT(val != NULL);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone) HClampToUint8(val));
+ val = Add<HClampToUint8>(val);
break;
}
case EXTERNAL_BYTE_ELEMENTS:
@@ -1143,8 +1134,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
length_checker.IfCompare(length, key, Token::EQ);
length_checker.Then();
- HValue* current_capacity =
- AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ HValue* current_capacity = AddLoadFixedArrayLength(elements);
IfBuilder capacity_checker(this);
@@ -1169,7 +1159,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
if (is_js_array) {
HValue* new_length = AddInstruction(
HAdd::New(zone, context, length, graph_->GetConstant1()));
- new_length->AssumeRepresentation(Representation::Integer32());
new_length->ClearFlag(HValue::kCanOverflow);
Representation representation = IsFastElementsKind(kind)
@@ -1180,7 +1169,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
length_checker.Else();
- AddBoundsCheck(key, length);
+ Add<HBoundsCheck>(key, length);
environment()->Push(elements);
length_checker.End();
@@ -1193,7 +1182,6 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length) {
- Zone* zone = this->zone();
Heap* heap = isolate()->heap();
IfBuilder cow_checker(this);
@@ -1202,8 +1190,7 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
Handle<Map>(heap->fixed_cow_array_map()));
cow_checker.Then();
- HValue* capacity =
- AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ HValue* capacity = AddLoadFixedArrayLength(elements);
HValue* new_elements = BuildGrowElementsCapacity(object, elements,
kind, length, capacity);
@@ -1258,17 +1245,16 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_js_array) {
length = AddLoad(object, HObjectAccess::ForArrayLength(), mapcheck,
Representation::Smi());
- length->set_type(HType::Smi());
} else {
- length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ length = AddLoadFixedArrayLength(elements);
}
+ length->set_type(HType::Smi());
HValue* checked_key = NULL;
if (IsExternalArrayElementsKind(elements_kind)) {
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
HLoadExternalArrayPointer* external_elements =
- new(zone) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
+ Add<HLoadExternalArrayPointer>(elements);
IfBuilder length_checker(this);
length_checker.IfCompare(key, length, Token::LT);
length_checker.Then();
@@ -1285,10 +1271,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
return result;
} else {
ASSERT(store_mode == STANDARD_STORE);
- checked_key = AddBoundsCheck(key, length);
+ checked_key = Add<HBoundsCheck>(key, length);
HLoadExternalArrayPointer* external_elements =
- new(zone) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
+ Add<HLoadExternalArrayPointer>(elements);
return AddInstruction(BuildExternalArrayElementAccess(
external_elements, checked_key, val, mapcheck,
elements_kind, is_store));
@@ -1303,8 +1288,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// deopt, leaving the backing store in an invalid state.
if (is_store && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
- val = AddInstruction(new(zone) HForceRepresentation(
- val, Representation::Smi()));
+ val = Add<HForceRepresentation>(val, Representation::Smi());
}
if (IsGrowStoreMode(store_mode)) {
@@ -1313,7 +1297,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
length, key, is_js_array);
checked_key = key;
} else {
- checked_key = AddBoundsCheck(key, length);
+ checked_key = Add<HBoundsCheck>(key, length);
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
@@ -1342,20 +1326,14 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
int elements_size = IsFastDoubleElementsKind(kind)
? kDoubleSize : kPointerSize;
- HConstant* elements_size_value =
- new(zone) HConstant(elements_size, Representation::Integer32());
- AddInstruction(elements_size_value);
+ HConstant* elements_size_value = Add<HConstant>(elements_size);
HValue* mul = AddInstruction(
HMul::New(zone, context, capacity, elements_size_value));
- mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
- HConstant* header_size =
- new(zone) HConstant(FixedArray::kHeaderSize, Representation::Integer32());
- AddInstruction(header_size);
+ HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
HValue* total_size = AddInstruction(
HAdd::New(zone, context, mul, header_size));
- total_size->AssumeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::DefaultFlags(kind);
@@ -1371,10 +1349,7 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
}
}
- HValue* elements =
- AddInstruction(new(zone) HAllocate(context, total_size,
- HType::JSArray(), flags));
- return elements;
+ return Add<HAllocate>(context, total_size, HType::JSArray(), flags);
}
@@ -1413,10 +1388,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
AddStore(array, HObjectAccess::ForMap(), array_map);
HConstant* empty_fixed_array =
- new(zone()) HConstant(
- Handle<FixedArray>(isolate()->heap()->empty_fixed_array()),
- Representation::Tagged());
- AddInstruction(empty_fixed_array);
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
AddStore(array, access, empty_fixed_array);
@@ -1433,10 +1405,8 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
elements_location += AllocationSiteInfo::kSize;
}
- HInnerAllocatedObject* elements = new(zone()) HInnerAllocatedObject(
- array, elements_location);
- AddInstruction(elements);
-
+ HInnerAllocatedObject* elements =
+ Add<HInnerAllocatedObject>(array, elements_location);
AddStore(array, HObjectAccess::ForElementsPointer(), elements);
return elements;
}
@@ -1448,26 +1418,30 @@ HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
}
+HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
+ HLoadNamedField* instr = AddLoad(object, HObjectAccess::ForFixedArrayLength(),
+ NULL, Representation::Smi());
+ instr->set_type(HType::Smi());
+ return instr;
+}
+
+
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
HValue* old_capacity) {
Zone* zone = this->zone();
HValue* half_old_capacity =
AddInstruction(HShr::New(zone, context, old_capacity,
graph_->GetConstant1()));
- half_old_capacity->AssumeRepresentation(Representation::Integer32());
half_old_capacity->ClearFlag(HValue::kCanOverflow);
HValue* new_capacity = AddInstruction(
HAdd::New(zone, context, half_old_capacity, old_capacity));
- new_capacity->AssumeRepresentation(Representation::Integer32());
new_capacity->ClearFlag(HValue::kCanOverflow);
- HValue* min_growth =
- AddInstruction(new(zone) HConstant(16, Representation::Integer32()));
+ HValue* min_growth = Add<HConstant>(16);
new_capacity = AddInstruction(
HAdd::New(zone, context, new_capacity, min_growth));
- new_capacity->AssumeRepresentation(Representation::Integer32());
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
@@ -1475,17 +1449,15 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
- Zone* zone = this->zone();
Heap* heap = isolate()->heap();
int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
: kPointerSize;
int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
max_size -= JSArray::kSize / element_size;
- HConstant* max_size_constant = new(zone) HConstant(max_size);
- AddInstruction(max_size_constant);
+ HConstant* max_size_constant = Add<HConstant>(max_size);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
- AddInstruction(new(zone) HBoundsCheck(length, max_size_constant));
+ Add<HBoundsCheck>(length, max_size_constant);
}
@@ -1521,12 +1493,9 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
- Zone* zone = this->zone();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
- Representation::Tagged()))
- : AddInstruction(new(zone) HConstant(nan_double,
- Representation::Double()));
+ ? Add<HConstant>(factory->the_hole_value())
+ : Add<HConstant>(nan_double);
// Special loop unfolding case
static const int kLoopUnfoldLimit = 4;
@@ -1553,15 +1522,15 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
if (unfold_loop) {
for (int i = 0; i < initial_capacity; i++) {
- HInstruction* key = AddInstruction(new(zone) HConstant(i));
- AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
+ HInstruction* key = Add<HConstant>(i);
+ Add<HStoreKeyed>(elements, key, hole, elements_kind);
}
} else {
LoopBuilder builder(this, context, LoopBuilder::kPostIncrement);
HValue* key = builder.BeginBody(from, to, Token::LT);
- AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
+ Add<HStoreKeyed>(elements, key, hole, elements_kind);
builder.EndBody();
}
@@ -1591,15 +1560,15 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT);
- HValue* element =
- AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL,
- from_elements_kind,
- ALLOW_RETURN_HOLE));
+ HValue* element = Add<HLoadKeyed>(from_elements, key,
+ static_cast<HValue*>(NULL),
+ from_elements_kind,
+ ALLOW_RETURN_HOLE);
ElementsKind holey_kind = IsFastSmiElementsKind(to_elements_kind)
? FAST_HOLEY_ELEMENTS : to_elements_kind;
- HInstruction* holey_store = AddInstruction(
- new(zone()) HStoreKeyed(to_elements, key, element, holey_kind));
+ HInstruction* holey_store = Add<HStoreKeyed>(to_elements, key,
+ element, holey_kind);
// Allow NaN hole values to converted to their tagged counterparts.
if (IsFastHoleyElementsKind(to_elements_kind)) {
holey_store->SetFlag(HValue::kAllowUndefinedAsNaN);
@@ -1620,8 +1589,6 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
AllocationSiteMode mode,
ElementsKind kind,
int length) {
- Zone* zone = this->zone();
-
NoObservableSideEffectsScope no_effects(this);
// All sizes here are multiples of kPointerSize.
@@ -1639,13 +1606,11 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
HAllocate::Flags allocate_flags = HAllocate::DefaultFlags(kind);
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
- HValue* size_in_bytes =
- AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
- HInstruction* object =
- AddInstruction(new(zone) HAllocate(context,
- size_in_bytes,
- HType::JSObject(),
- allocate_flags));
+ HValue* size_in_bytes = Add<HConstant>(size);
+ HInstruction* object = Add<HAllocate>(context,
+ size_in_bytes,
+ HType::JSObject(),
+ allocate_flags);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -1664,8 +1629,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
HValue* boilerplate_elements = AddLoadElements(boilerplate);
- HValue* object_elements =
- AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
+ HValue* object_elements = Add<HInnerAllocatedObject>(object, elems_offset);
AddStore(object, HObjectAccess::ForElementsPointer(), object_elements);
// Copy the elements array header.
@@ -1679,16 +1643,10 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// copying loops with constant length up to a given boundary and use this
// helper here instead.
for (int i = 0; i < length; i++) {
- HValue* key_constant = AddInstruction(new(zone) HConstant(i));
- HInstruction* value =
- AddInstruction(new(zone) HLoadKeyed(boilerplate_elements,
- key_constant,
- NULL,
- kind));
- AddInstruction(new(zone) HStoreKeyed(object_elements,
- key_constant,
- value,
- kind));
+ HValue* key_constant = Add<HConstant>(i);
+ HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind);
+ Add<HStoreKeyed>(object_elements, key_constant, value, kind);
}
}
@@ -1698,39 +1656,35 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
void HGraphBuilder::BuildCompareNil(
HValue* value,
- CompareNilICStub::Types types,
- Handle<Map> map,
+ Handle<Type> type,
int position,
HIfContinuation* continuation) {
IfBuilder if_nil(this, position);
bool needs_or = false;
- if (types.Contains(CompareNilICStub::NULL_TYPE)) {
+ if (type->Maybe(Type::Null())) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
needs_or = true;
}
- if (types.Contains(CompareNilICStub::UNDEFINED)) {
+ if (type->Maybe(Type::Undefined())) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
needs_or = true;
}
- // Handle either undetectable or monomorphic, not both.
- ASSERT(!types.Contains(CompareNilICStub::UNDETECTABLE) ||
- !types.Contains(CompareNilICStub::MONOMORPHIC_MAP));
- if (types.Contains(CompareNilICStub::UNDETECTABLE)) {
+ if (type->Maybe(Type::Undetectable())) {
if (needs_or) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
} else {
if_nil.Then();
if_nil.Else();
- if (!map.is_null() && types.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
- BuildCheckNonSmi(value);
+ if (type->NumClasses() == 1) {
+ BuildCheckHeapObject(value);
// For ICs, the map checked below is a sentinel map that gets replaced by
// the monomorphic map when the code is used as a template to generate a
// new IC. For optimized functions, there is no sentinel map, the map
// emitted below is the actual monomorphic map.
- BuildCheckMap(value, map);
+ BuildCheckMap(value, type->Classes().Current());
} else {
if_nil.Deopt();
}
@@ -1743,9 +1697,8 @@ void HGraphBuilder::BuildCompareNil(
HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
int previous_object_size,
HValue* payload) {
- HInnerAllocatedObject* alloc_site = new(zone())
- HInnerAllocatedObject(previous_object, previous_object_size);
- AddInstruction(alloc_site);
+ HInnerAllocatedObject* alloc_site = Add<HInnerAllocatedObject>(
+ previous_object, previous_object_size);
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
AddStoreMapConstant(alloc_site, alloc_site_map);
HObjectAccess access = HObjectAccess::ForAllocationSitePayload();
@@ -1756,8 +1709,7 @@ HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
// Get the global context, then the native context
- HInstruction* global_object = AddInstruction(new(zone())
- HGlobalObject(context));
+ HInstruction* global_object = Add<HGlobalObject>(context);
HObjectAccess access = HObjectAccess::ForJSObjectOffset(
GlobalObject::kNativeContextOffset);
return AddLoad(global_object, access);
@@ -1766,23 +1718,23 @@ HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
HInstruction* native_context = BuildGetNativeContext(context);
- HInstruction* index = AddInstruction(new(zone())
- HConstant(Context::ARRAY_FUNCTION_INDEX, Representation::Integer32()));
-
- return AddInstruction(new (zone())
- HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
+ HInstruction* index =
+ Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
+ return Add<HLoadKeyed>(
+ native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
- ElementsKind kind,
- HValue* allocation_site_payload,
- bool disable_allocation_sites) :
+ ElementsKind kind,
+ HValue* allocation_site_payload,
+ HValue* constructor_function,
+ AllocationSiteOverrideMode override_mode) :
builder_(builder),
kind_(kind),
allocation_site_payload_(allocation_site_payload),
- constructor_function_(NULL) {
- mode_ = disable_allocation_sites
+ constructor_function_(constructor_function) {
+ mode_ = override_mode == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSiteInfo::GetMode(kind);
}
@@ -1800,26 +1752,35 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
- HInstruction* native_context = builder()->BuildGetNativeContext(context);
+ if (kind_ == GetInitialFastElementsKind()) {
+ // No need for a context lookup if the kind_ matches the initial
+ // map, because we can just load the map in that case.
+ HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* load =
+ builder()->BuildLoadNamedField(constructor_function_,
+ access,
+ Representation::Tagged());
+ return builder()->AddInstruction(load);
+ }
- HInstruction* index = builder()->AddInstruction(new(zone())
- HConstant(Context::JS_ARRAY_MAPS_INDEX, Representation::Integer32()));
+ HInstruction* native_context = builder()->BuildGetNativeContext(context);
+ HInstruction* index = builder()->Add<HConstant>(
+ static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
- HInstruction* map_array = builder()->AddInstruction(new(zone())
- HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
+ HInstruction* map_array = builder()->Add<HLoadKeyed>(
+ native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- HInstruction* kind_index = builder()->AddInstruction(new(zone())
- HConstant(kind_, Representation::Integer32()));
+ HInstruction* kind_index = builder()->Add<HConstant>(kind_);
- return builder()->AddInstruction(new(zone())
- HLoadKeyed(map_array, kind_index, NULL, FAST_ELEMENTS));
+ return builder()->Add<HLoadKeyed>(
+ map_array, kind_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
}
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return AddInstruction(
+ return builder()->AddInstruction(
builder()->BuildLoadNamedField(constructor_function_,
access,
Representation::Tagged()));
@@ -1842,22 +1803,17 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
base_size += FixedArray::kHeaderSize;
}
- HInstruction* elements_size_value = new(zone())
- HConstant(elements_size(), Representation::Integer32());
- AddInstruction(elements_size_value);
+ HInstruction* elements_size_value =
+ builder()->Add<HConstant>(elements_size());
HInstruction* mul = HMul::New(zone(), context, length_node,
elements_size_value);
- mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
- AddInstruction(mul);
+ builder()->AddInstruction(mul);
- HInstruction* base = new(zone()) HConstant(base_size,
- Representation::Integer32());
- AddInstruction(base);
+ HInstruction* base = builder()->Add<HConstant>(base_size);
HInstruction* total_size = HAdd::New(zone(), context, base, mul);
- total_size->AssumeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow);
- AddInstruction(total_size);
+ builder()->AddInstruction(total_size);
return total_size;
}
@@ -1872,18 +1828,13 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() {
? FixedDoubleArray::SizeFor(initial_capacity())
: FixedArray::SizeFor(initial_capacity());
- HConstant* array_size =
- new(zone()) HConstant(base_size, Representation::Integer32());
- AddInstruction(array_size);
- return array_size;
+ return builder()->Add<HConstant>(base_size);
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HValue* size_in_bytes = EstablishEmptyArrayAllocationSize();
- HConstant* capacity =
- new(zone()) HConstant(initial_capacity(), Representation::Integer32());
- AddInstruction(capacity);
+ HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
return AllocateArray(size_in_bytes,
capacity,
builder()->graph()->GetConstant0(),
@@ -1907,13 +1858,12 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
// Allocate (dealing with failure appropriately)
HAllocate::Flags flags = HAllocate::DefaultFlags(kind_);
- HAllocate* new_object = new(zone()) HAllocate(context, size_in_bytes,
- HType::JSArray(), flags);
- AddInstruction(new_object);
+ HAllocate* new_object = builder()->Add<HAllocate>(context, size_in_bytes,
+ HType::JSArray(), flags);
// Fill in the fields: map, properties, length
HValue* map;
- if (constructor_function_ != NULL) {
+ if (allocation_site_payload_ == NULL) {
map = EmitInternalMapCode();
} else {
map = EmitMapCode(context);
@@ -1940,10 +1890,7 @@ HStoreNamedField* HGraphBuilder::AddStore(HValue *object,
HObjectAccess access,
HValue *val,
Representation representation) {
- HStoreNamedField *instr = new(zone())
- HStoreNamedField(object, access, val, representation);
- AddInstruction(instr);
- return instr;
+ return Add<HStoreNamedField>(object, access, val, representation);
}
@@ -1951,21 +1898,14 @@ HLoadNamedField* HGraphBuilder::AddLoad(HValue *object,
HObjectAccess access,
HValue *typecheck,
Representation representation) {
- HLoadNamedField *instr =
- new(zone()) HLoadNamedField(object, access, typecheck, representation);
- AddInstruction(instr);
- return instr;
+ return Add<HLoadNamedField>(object, access, typecheck, representation);
}
HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
Handle<Map> map) {
- HValue* constant =
- AddInstruction(new(zone()) HConstant(map, Representation::Tagged()));
- HStoreNamedField *instr =
- new(zone()) HStoreNamedField(object, HObjectAccess::ForMap(), constant);
- AddInstruction(instr);
- return instr;
+ return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
+ Add<HConstant>(map));
}
@@ -1977,7 +1917,8 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
break_scope_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- inline_bailout_(false) {
+ inline_bailout_(false),
+ osr_(new(info->zone()) HOsrBuilder(this)) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
@@ -2045,6 +1986,7 @@ HGraph::HGraph(CompilationInfo* info)
values_(16, info->zone()),
phi_list_(NULL),
uint32_instructions_(NULL),
+ osr_(NULL),
info_(info),
zone_(info->zone()),
is_recursive_(false),
@@ -2080,10 +2022,8 @@ void HGraph::FinalizeUniqueValueIds() {
DisallowHeapAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- instr->FinalizeUniqueValueId();
+ for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+ it.Current()->FinalizeUniqueValueId();
}
}
}
@@ -2095,24 +2035,22 @@ void HGraph::Canonicalize() {
// We must be careful not to set the flag unnecessarily, because GVN
// cannot identify two instructions when their flag value differs.
for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
+ for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
if (instr->IsArithmeticBinaryOperation() &&
instr->representation().IsInteger32() &&
instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToInt32)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
- instr = instr->next();
}
}
// Perform actual Canonicalization pass.
for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
+ for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
HValue* value = instr->Canonicalize();
if (value != instr) instr->DeleteAndReplaceWith(value);
- instr = instr->next();
}
}
}
@@ -2413,7 +2351,7 @@ class PostorderProcessor : public ZoneObject {
void HGraph::OrderBlocks() {
- HPhase phase("H_Block ordering", isolate());
+ CompilationPhase phase("H_Block ordering", info());
BitVector visited(blocks_.length(), zone());
ZoneList<HBasicBlock*> reverse_result(8, zone());
@@ -2487,8 +2425,8 @@ void HGraph::NullifyUnreachableInstructions() {
}
}
if (all_predecessors_deoptimizing) nullify = true;
- for (HInstruction* instr = block->first(); instr != NULL;
- instr = instr->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
// Leave the basic structure of the graph intact.
if (instr->IsBlockEntry()) continue;
if (instr->IsControlInstruction()) continue;
@@ -2642,171 +2580,6 @@ void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
}
-class HRangeAnalysis BASE_EMBEDDED {
- public:
- explicit HRangeAnalysis(HGraph* graph) :
- graph_(graph), zone_(graph->zone()), changed_ranges_(16, zone_) { }
-
- void Analyze();
-
- private:
- void TraceRange(const char* msg, ...);
- void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
- void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferRange(HValue* value);
- void RollBackTo(int index);
- void AddRange(HValue* value, Range* range);
-
- HGraph* graph_;
- Zone* zone_;
- ZoneList<HValue*> changed_ranges_;
-};
-
-
-void HRangeAnalysis::TraceRange(const char* msg, ...) {
- if (FLAG_trace_range) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-void HRangeAnalysis::Analyze() {
- HPhase phase("H_Range analysis", graph_);
- Analyze(graph_->entry_block());
-}
-
-
-void HRangeAnalysis::Analyze(HBasicBlock* block) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- int last_changed_range = changed_ranges_.length() - 1;
-
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsCompareIDAndBranch()) {
- InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
- }
- }
-
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferRange(phi);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != block->end()) {
- InferRange(instr);
- instr = instr->next();
- }
-
- // Continue analysis in all dominated blocks.
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- Analyze(block->dominated_blocks()->at(i));
- }
-
- RollBackTo(last_changed_range);
-}
-
-
-void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
- HBasicBlock* dest) {
- ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->representation().IsInteger32()) {
- Token::Value op = test->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::ReverseCompareOp(op);
- UpdateControlFlowRange(op, test->left(), test->right());
- UpdateControlFlowRange(inverted_op, test->right(), test->left());
- }
-}
-
-
-// We know that value [op] other. Use this information to update the range on
-// value.
-void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
- Range temp_range;
- Range* range = other->range() != NULL ? other->range() : &temp_range;
- Range* new_range = NULL;
-
- TraceRange("Control flow range infer %d %s %d\n",
- value->id(),
- Token::Name(op),
- other->id());
-
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- // The same range has to apply for value.
- new_range = range->Copy(zone_);
- } else if (op == Token::LT || op == Token::LTE) {
- new_range = range->CopyClearLower(zone_);
- if (op == Token::LT) {
- new_range->AddConstant(-1);
- }
- } else if (op == Token::GT || op == Token::GTE) {
- new_range = range->CopyClearUpper(zone_);
- if (op == Token::GT) {
- new_range->AddConstant(1);
- }
- }
-
- if (new_range != NULL && !new_range->IsMostGeneric()) {
- AddRange(value, new_range);
- }
-}
-
-
-void HRangeAnalysis::InferRange(HValue* value) {
- ASSERT(!value->HasRange());
- if (!value->representation().IsNone()) {
- value->ComputeInitialRange(zone_);
- Range* range = value->range();
- TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
- value->id(),
- value->Mnemonic(),
- range->lower(),
- range->upper());
- }
-}
-
-
-void HRangeAnalysis::RollBackTo(int index) {
- for (int i = index + 1; i < changed_ranges_.length(); ++i) {
- changed_ranges_[i]->RemoveLastAddedRange();
- }
- changed_ranges_.Rewind(index + 1);
-}
-
-
-void HRangeAnalysis::AddRange(HValue* value, Range* range) {
- Range* original_range = value->range();
- value->AddNewRange(range, zone_);
- changed_ranges_.Add(value, zone_);
- Range* new_range = value->range();
- TraceRange("Updated range of %d set to [%d,%d]\n",
- value->id(),
- new_range->lower(),
- new_range->upper());
- if (original_range != NULL) {
- TraceRange("Original range was [%d,%d]\n",
- original_range->lower(),
- original_range->upper());
- }
- TraceRange("New information was [%d,%d]\n",
- range->lower(),
- range->upper());
-}
-
-
class HStackCheckEliminator BASE_EMBEDDED {
public:
explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
@@ -2830,13 +2603,11 @@ void HStackCheckEliminator::Process() {
HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
HBasicBlock* dominator = back_edge;
while (true) {
- HInstruction* instr = dominator->first();
- while (instr != NULL) {
- if (instr->IsCall()) {
+ for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
+ if (it.Current()->IsCall()) {
block->loop_information()->stack_check()->Eliminate();
break;
}
- instr = instr->next();
}
// Done when the loop header is processed.
@@ -2850,145 +2621,6 @@ void HStackCheckEliminator::Process() {
}
-void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsTagged()) return;
- if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
- if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current, zone());
- in_worklist_.Add(current->id());
-}
-
-
-void HInferRepresentation::Analyze() {
- HPhase phase("H_Infer representations", graph_);
-
- // (1) Initialize bit vectors and count real uses. Each phi gets a
- // bit-vector of length <number of phis>.
- const ZoneList<HPhi*>* phi_list = graph_->phi_list();
- int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->InitRealUses(i);
- BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
- connected_set->Add(i);
- connected_phis.Add(connected_set, zone());
- }
-
- // (2) Do a fixed point iteration to find the set of connected phis. A
- // phi is connected to another phi if its value is used either directly or
- // indirectly through a transitive closure of the def-use relation.
- bool change = true;
- while (change) {
- change = false;
- // We normally have far more "forward edges" than "backward edges",
- // so we terminate faster when we walk backwards.
- for (int i = phi_count - 1; i >= 0; --i) {
- HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->IsPhi()) {
- int id = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
- change = true;
- }
- }
- }
- }
-
- // Set truncation flags for groups of connected phis. This is a conservative
- // approximation; the flag will be properly re-computed after representations
- // have been determined.
- if (phi_count > 0) {
- BitVector* done = new(zone()) BitVector(phi_count, graph_->zone());
- for (int i = 0; i < phi_count; ++i) {
- if (done->Contains(i)) continue;
-
- // Check if all uses of all connected phis in this group are truncating.
- bool all_uses_everywhere_truncating = true;
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- all_uses_everywhere_truncating &=
- phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
- done->Add(index);
- }
- if (all_uses_everywhere_truncating) {
- continue; // Great, nothing to do.
- }
- // Clear truncation flag of this group of connected phis.
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
- }
- }
- }
-
- // Simplify constant phi inputs where possible.
- // This step uses kTruncatingToInt32 flags of phis.
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->SimplifyConstantInputs();
- }
-
- // Use the phi reachability information from step 2 to
- // sum up the non-phi use counts of all connected phis.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- HPhi* it_use = phi_list->at(index);
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
- }
- }
-
- // Initialize work list
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- AddToWorklist(phis->at(j));
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- AddToWorklist(current);
- current = current->next();
- }
- }
-
- // Do a fixed point iteration, trying to improve representations
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
- current->InferRepresentation(this);
- }
-
- // Lastly: any instruction that we don't have representation information
- // for defaults to Tagged.
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- HPhi* phi = phis->at(j);
- if (phi->representation().IsNone()) {
- phi->ChangeRepresentation(Representation::Tagged());
- }
- }
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
- if (current->representation().IsNone() &&
- current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
- current->ChangeRepresentation(Representation::Tagged());
- }
- }
- }
-}
-
-
void HGraph::MergeRemovableSimulates() {
HPhase phase("H_Merge removable simulates", this);
ZoneList<HSimulate*> mergelist(2, zone());
@@ -2999,8 +2631,8 @@ void HGraph::MergeRemovableSimulates() {
// Nasty heuristic: Never remove the first simulate in a block. This
// just so happens to have a beneficial effect on register allocation.
bool first = true;
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
if (current->IsLeaveInlined()) {
// Never fold simulates from inlined environments into simulates
// in the outer environment.
@@ -3014,6 +2646,15 @@ void HGraph::MergeRemovableSimulates() {
}
continue;
}
+ if (current->IsReturn()) {
+ // Drop mergeable simulates in the list. This is safe because
+ // simulates after instructions with side effects are never added
+ // to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ continue;
+ }
// Skip the non-simulates and the first simulate.
if (!current->IsSimulate()) continue;
if (first) {
@@ -3058,10 +2699,8 @@ void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
phis->at(j)->UpdateInferredType();
}
- HInstruction* current = block->first();
- while (current != NULL) {
- current->UpdateInferredType();
- current = current->next();
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ it.Current()->UpdateInferredType();
}
if (block->IsLoopHeader()) {
@@ -3289,235 +2928,12 @@ void HGraph::MarkDeoptimizeOnUndefined() {
}
-// Discover instructions that can be marked with kUint32 flag allowing
-// them to produce full range uint32 values.
-class Uint32Analysis BASE_EMBEDDED {
- public:
- explicit Uint32Analysis(Zone* zone) : zone_(zone), phis_(4, zone) { }
-
- void Analyze(HInstruction* current);
-
- void UnmarkUnsafePhis();
-
- private:
- bool IsSafeUint32Use(HValue* val, HValue* use);
- bool Uint32UsesAreSafe(HValue* uint32val);
- bool CheckPhiOperands(HPhi* phi);
- void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist);
-
- Zone* zone_;
- ZoneList<HPhi*> phis_;
-};
-
-
-bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
- // Operations that operatate on bits are safe.
- if (use->IsBitwise() ||
- use->IsShl() ||
- use->IsSar() ||
- use->IsShr() ||
- use->IsBitNot()) {
- return true;
- } else if (use->IsChange() || use->IsSimulate()) {
- // Conversions and deoptimization have special support for unt32.
- return true;
- } else if (use->IsStoreKeyed()) {
- HStoreKeyed* store = HStoreKeyed::cast(use);
- if (store->is_external()) {
- // Storing a value into an external integer array is a bit level
- // operation.
- if (store->value() == val) {
- // Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
- return true;
- }
- }
- }
-
- return false;
-}
-
-
-// Iterate over all uses and verify that they are uint32 safe: either don't
-// distinguish between int32 and uint32 due to their bitwise nature or
-// have special support for uint32 values.
-// Encountered phis are optimisitically treated as safe uint32 uses,
-// marked with kUint32 flag and collected in the phis_ list. A separate
-// path will be performed later by UnmarkUnsafePhis to clear kUint32 from
-// phis that are not actually uint32-safe (it requries fix point iteration).
-bool Uint32Analysis::Uint32UsesAreSafe(HValue* uint32val) {
- bool collect_phi_uses = false;
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- if (use->IsPhi()) {
- if (!use->CheckFlag(HInstruction::kUint32)) {
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- collect_phi_uses = true;
- }
-
- // Optimistically treat phis as uint32 safe.
- continue;
- }
-
- if (!IsSafeUint32Use(uint32val, use)) {
- return false;
- }
- }
-
- if (collect_phi_uses) {
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
- use->SetFlag(HInstruction::kUint32);
- phis_.Add(HPhi::cast(use), zone_);
- }
- }
- }
-
- return true;
-}
-
-
-// Analyze instruction and mark it with kUint32 if all its uses are uint32
-// safe.
-void Uint32Analysis::Analyze(HInstruction* current) {
- if (Uint32UsesAreSafe(current)) current->SetFlag(HInstruction::kUint32);
-}
-
-
-// Check if all operands to the given phi are marked with kUint32 flag.
-bool Uint32Analysis::CheckPhiOperands(HPhi* phi) {
- if (!phi->CheckFlag(HInstruction::kUint32)) {
- // This phi is not uint32 safe. No need to check operands.
- return false;
- }
-
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (!operand->CheckFlag(HInstruction::kUint32)) {
- // Lazyly mark constants that fit into uint32 range with kUint32 flag.
- if (operand->IsInteger32Constant() &&
- operand->GetInteger32Constant() >= 0) {
- operand->SetFlag(HInstruction::kUint32);
- continue;
- }
-
- // This phi is not safe, some operands are not uint32 values.
- return false;
- }
- }
-
- return true;
-}
-
-
-// Remove kUint32 flag from the phi itself and its operands. If any operand
-// was a phi marked with kUint32 place it into a worklist for
-// transitive clearing of kUint32 flag.
-void Uint32Analysis::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
- phi->ClearFlag(HInstruction::kUint32);
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (operand->CheckFlag(HInstruction::kUint32)) {
- operand->ClearFlag(HInstruction::kUint32);
- if (operand->IsPhi()) {
- worklist->Add(HPhi::cast(operand), zone_);
- }
- }
- }
-}
-
-
-void Uint32Analysis::UnmarkUnsafePhis() {
- // No phis were collected. Nothing to do.
- if (phis_.length() == 0) return;
-
- // Worklist used to transitively clear kUint32 from phis that
- // are used as arguments to other phis.
- ZoneList<HPhi*> worklist(phis_.length(), zone_);
-
- // Phi can be used as a uint32 value if and only if
- // all its operands are uint32 values and all its
- // uses are uint32 safe.
-
- // Iterate over collected phis and unmark those that
- // are unsafe. When unmarking phi unmark its operands
- // and add it to the worklist if it is a phi as well.
- // Phis that are still marked as safe are shifted down
- // so that all safe phis form a prefix of the phis_ array.
- int phi_count = 0;
- for (int i = 0; i < phis_.length(); i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
- phis_[phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
-
- // Now phis array contains only those phis that have safe
- // non-phi uses. Start transitively clearing kUint32 flag
- // from phi operands of discovered non-safe phies until
- // only safe phies are left.
- while (!worklist.is_empty()) {
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- UnmarkPhi(phi, &worklist);
- }
-
- // Check if any operands to safe phies were unmarked
- // turning a safe phi into unsafe. The same value
- // can flow into several phis.
- int new_phi_count = 0;
- for (int i = 0; i < phi_count; i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi)) {
- phis_[new_phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
- phi_count = new_phi_count;
- }
-}
-
-
-void HGraph::ComputeSafeUint32Operations() {
- HPhase phase("H_Compute safe UInt32 operations", this);
- if (uint32_instructions_ == NULL) return;
-
- Uint32Analysis analysis(zone());
- for (int i = 0; i < uint32_instructions_->length(); ++i) {
- HInstruction* current = uint32_instructions_->at(i);
- if (current->IsLinked() && current->representation().IsInteger32()) {
- analysis.Analyze(current);
- }
- }
-
- // Some phis might have been optimistically marked with kUint32 flag.
- // Remove this flag from those phis that are unsafe and propagate
- // this information transitively potentially clearing kUint32 flag
- // from some non-phi operations that are used as operands to unsafe phis.
- analysis.UnmarkUnsafePhis();
-}
-
-
void HGraph::ComputeMinusZeroChecks() {
HPhase phase("H_Compute minus zero checks", this);
BitVector visited(GetMaximumValueID(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
- for (HInstruction* current = blocks_[i]->first();
- current != NULL;
- current = current->next()) {
+ for (HInstructionIterator it(blocks_[i]); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
if (current->IsChange()) {
HChange* change = HChange::cast(current);
// Propagate flags for negative zero checks upwards from conversions
@@ -3550,6 +2966,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
+ arguments_object_(NULL),
arguments_elements_(NULL),
outer_(owner->function_state()) {
if (outer_ != NULL) {
@@ -3822,8 +3239,15 @@ void TestContext::BuildBranch(HValue* value) {
} while (false)
+#define CHECK_ALIVE_OR_RETURN(call, value) \
+ do { \
+ call; \
+ if (HasStackOverflow() || current_block() == NULL) return value; \
+ } while (false)
+
+
void HOptimizedGraphBuilder::Bailout(const char* reason) {
- info()->set_bailout_reason(reason);
+ current_info()->set_bailout_reason(reason);
SetStackOverflow();
}
@@ -3859,7 +3283,7 @@ void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
- Push(AddInstruction(new(zone()) HPushArgument(Pop())));
+ Push(Add<HPushArgument>(Pop()));
}
@@ -3880,11 +3304,11 @@ void HOptimizedGraphBuilder::VisitExpressions(
bool HOptimizedGraphBuilder::BuildGraph() {
- if (info()->function()->is_generator()) {
+ if (current_info()->function()->is_generator()) {
Bailout("function is a generator");
return false;
}
- Scope* scope = info()->scope();
+ Scope* scope = current_info()->scope();
if (scope->HasIllegalRedeclaration()) {
Bailout("function with illegal redeclaration");
return false;
@@ -3925,10 +3349,9 @@ bool HOptimizedGraphBuilder::BuildGraph() {
AddSimulate(BailoutId::Declarations());
HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+ Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
- VisitStatements(info()->function()->body());
+ VisitStatements(current_info()->function()->body());
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
@@ -3940,7 +3363,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
// last time this function was compiled, then this recompile is likely not
// due to missing/inadequate type feedback, but rather too aggressive
// optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
+ Handle<Code> unoptimized_code(current_info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
@@ -3950,22 +3373,10 @@ bool HOptimizedGraphBuilder::BuildGraph() {
!type_info->matches_inlined_type_change_checksum(composite_checksum));
type_info->set_inlined_type_change_checksum(composite_checksum);
- return true;
-}
-
+ // Perform any necessary OSR-specific cleanups or changes to the graph.
+ osr_->FinishGraph();
-// Perform common subexpression elimination and loop-invariant code motion.
-void HGraph::GlobalValueNumbering() {
- HPhase phase("H_Global value numbering", this);
- HGlobalValueNumberer gvn(this, info());
- bool removed_side_effects = gvn.Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values that
- // could only be discovered by removing side-effect-generating instructions
- // during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects) {
- removed_side_effects = gvn.Analyze();
- ASSERT(!removed_side_effects);
- }
+ return true;
}
@@ -3985,9 +3396,8 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
Verify(true);
#endif
- if (FLAG_analyze_environment_liveness) {
- EnvironmentSlotLivenessAnalyzer esla(this);
- esla.AnalyzeAndTrim();
+ if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
+ Run<HEnvironmentLivenessAnalysisPhase>();
}
PropagateDeoptimizingMark();
@@ -4009,16 +3419,9 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
}
CollectPhis();
- if (has_osr_loop_entry()) {
- const ZoneList<HPhi*>* phis = osr_loop_entry()->phis();
- for (int j = 0; j < phis->length(); j++) {
- HPhi* phi = phis->at(j);
- osr_values()->at(phi->merged_index())->set_incoming_value(phi);
- }
- }
+ if (has_osr()) osr()->FinishOsrValues();
- HInferRepresentation rep(this);
- rep.Analyze();
+ Run<HInferRepresentationPhase>();
// Remove HSimulate instructions that have turned out not to be needed
// after all by folding them into the following HSimulate.
@@ -4033,16 +3436,16 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
// Must be performed before canonicalization to ensure that Canonicalize
// will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
// zero.
- if (FLAG_opt_safe_uint32_operations) ComputeSafeUint32Operations();
+ if (FLAG_opt_safe_uint32_operations) Run<HUint32AnalysisPhase>();
if (FLAG_use_canonicalizing) Canonicalize();
- if (FLAG_use_gvn) GlobalValueNumbering();
+ if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+
+ if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
+
+ if (FLAG_use_range) Run<HRangeAnalysisPhase>();
- if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(this);
- rangeAnalysis.Analyze();
- }
ComputeMinusZeroChecks();
// Eliminate redundant stack checks on backwards branches.
@@ -4073,7 +3476,8 @@ void HGraph::SetupInformativeDefinitionsInBlock(HBasicBlock* block) {
ASSERT(!phi->IsInformativeDefinition());
}
- for (HInstruction* i = block->first(); i != NULL; i = i->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* i = it.Current();
i->AddInformativeDefinitions();
i->SetFlag(HValue::kIDefsProcessingDone);
i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions();
@@ -4091,7 +3495,8 @@ void HGraph::SetupInformativeDefinitionsRecursively(HBasicBlock* block) {
SetupInformativeDefinitionsRecursively(block->dominated_blocks()->at(i));
}
- for (HInstruction* i = block->first(); i != NULL; i = i->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* i = it.Current();
if (i->IsBoundsCheck()) {
HBoundsCheck* check = HBoundsCheck::cast(i);
check->ApplyIndexChange();
@@ -4394,7 +3799,8 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
BoundsCheckTable* table) {
BoundsCheckBbData* bb_data_list = NULL;
- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+ for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
+ HInstruction* i = it.Current();
if (!i->IsBoundsCheck()) continue;
HBoundsCheck* check = HBoundsCheck::cast(i);
@@ -4516,9 +3922,8 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
void HGraph::DehoistSimpleArrayIndexComputations() {
HPhase phase("H_Dehoist index computations", this);
for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
+ for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
ArrayInstructionInterface* array_instruction = NULL;
if (instr->IsLoadKeyed()) {
HLoadKeyed* op = HLoadKeyed::cast(instr);
@@ -4548,9 +3953,8 @@ void HGraph::MarkLiveInstructions() {
// Mark initial root instructions for dead code elimination.
for (int i = 0; i < blocks()->length(); ++i) {
HBasicBlock* block = blocks()->at(i);
- for (HInstruction* instr = block->first();
- instr != NULL;
- instr = instr->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
if (instr->CannotBeEliminated()) MarkLive(NULL, instr, &worklist);
}
for (int j = 0; j < block->phis()->length(); j++) {
@@ -4596,9 +4000,8 @@ void HGraph::RemoveDeadInstructions() {
// Remove any instruction not marked kIsLive.
for (int i = 0; i < blocks()->length(); ++i) {
HBasicBlock* block = blocks()->at(i);
- for (HInstruction* instr = block->first();
- instr != NULL;
- instr = instr->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
if (!instr->CheckFlag(HValue::kIsLive)) {
// Instruction has not been marked live; assume it is dead and remove.
// TODO(titzer): we don't remove constants because some special ones
@@ -4643,9 +4046,8 @@ void HGraph::RestoreActualValues() {
}
#endif
- for (HInstruction* instruction = block->first();
- instruction != NULL;
- instruction = instruction->next()) {
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
if (instruction->ActualValue() != instruction) {
ASSERT(instruction->IsInformativeDefinition());
if (instruction->IsPurelyInformativeDefinition()) {
@@ -4659,12 +4061,6 @@ void HGraph::RestoreActualValues() {
}
-void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddPhi(instr);
-}
-
-
void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
Push(instr);
AddInstruction(instr);
@@ -4672,9 +4068,11 @@ void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
void HOptimizedGraphBuilder::AddSoftDeoptimize() {
+ isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return;
if (current_block()->IsDeoptimizing()) return;
- AddInstruction(new(zone()) HSoftDeoptimize());
+ Add<HSoftDeoptimize>();
+ isolate()->counters()->soft_deopts_inserted()->Increment();
current_block()->MarkAsDeoptimizing();
graph()->set_has_soft_deoptimize(true);
}
@@ -4689,33 +4087,32 @@ HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
}
while (!arguments.is_empty()) {
- AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
+ Add<HPushArgument>(arguments.RemoveLast());
}
return call;
}
void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
- HConstant* undefined_constant = new(zone()) HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
- AddInstruction(undefined_constant);
+ HConstant* undefined_constant = Add<HConstant>(
+ isolate()->factory()->undefined_value());
graph()->set_undefined_constant(undefined_constant);
- HArgumentsObject* object = new(zone()) HArgumentsObject;
- AddInstruction(object);
- graph()->SetArgumentsObject(object);
-
- // Set the initial values of parameters including "this". "This" has
- // parameter index 0.
+ // Create an arguments object containing the initial parameters. Set the
+ // initial values of parameters including "this" having parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
-
+ HArgumentsObject* arguments_object =
+ new(zone()) HArgumentsObject(environment()->parameter_count(), zone());
for (int i = 0; i < environment()->parameter_count(); ++i) {
- HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
+ HInstruction* parameter = Add<HParameter>(i);
+ arguments_object->AddArgument(parameter, zone());
environment()->Bind(i, parameter);
}
+ AddInstruction(arguments_object);
+ graph()->SetArgumentsObject(arguments_object);
// First special is HContext.
- HInstruction* context = AddInstruction(new(zone()) HContext);
+ HInstruction* context = Add<HContext>();
environment()->BindContext(context);
// Initialize specials and locals to undefined.
@@ -5027,7 +4424,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HControlInstruction* compare;
if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
- if (!clause->IsSmiCompare()) {
+ if (!clause->compare_type()->Is(Type::Smi())) {
AddSoftDeoptimize();
}
@@ -5036,7 +4433,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
label_value,
Token::EQ_STRICT);
compare_->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
+ Representation::Smi(), Representation::Smi());
compare = compare_;
} else {
compare = new(zone()) HStringCompareAndBranch(context, tag_value,
@@ -5128,71 +4525,14 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
-bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == info()->osr_ast_id();
-}
-
-
-bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return false;
-
- HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
- HBasicBlock* osr_entry = graph()->CreateBasicBlock();
- HValue* true_value = graph()->GetConstantTrue();
- HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
- current_block()->Finish(test);
-
- HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
-
- set_current_block(osr_entry);
- osr_entry->set_osr_entry();
- BailoutId osr_entry_id = statement->OsrEntryId();
- int first_expression_index = environment()->first_expression_index();
- int length = environment()->length();
- ZoneList<HUnknownOSRValue*>* osr_values =
- new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
-
- for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Bind(i, osr_value);
- osr_values->Add(osr_value, zone());
- }
-
- if (first_expression_index != length) {
- environment()->Drop(length - first_expression_index);
- for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Push(osr_value);
- osr_values->Add(osr_value, zone());
- }
- }
-
- graph()->set_osr_values(osr_values);
-
- AddSimulate(osr_entry_id);
- AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
- HContext* context = new(zone()) HContext;
- AddInstruction(context);
- environment()->BindContext(context);
- current_block()->Goto(loop_predecessor);
- loop_predecessor->SetJoinId(statement->EntryId());
- set_current_block(loop_predecessor);
- return true;
-}
-
-
void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
AddSimulate(stmt->StackCheckId());
HValue* context = environment()->LookupContext();
- HStackCheck* stack_check =
- new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
- AddInstruction(stack_check);
+ HStackCheck* stack_check = Add<HStackCheck>(
+ context, HStackCheck::kBackwardsBranch);
ASSERT(loop_entry->IsLoopHeader());
loop_entry->loop_information()->set_stack_check(stack_check);
CHECK_BAILOUT(Visit(stmt->body()));
@@ -5204,11 +4544,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -5247,12 +4583,7 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
+ HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -5294,11 +4625,7 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -5361,38 +4688,28 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->enumerable()));
HValue* enumerable = Top(); // Leave enumerable at the top.
- HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
- environment()->LookupContext(), enumerable));
+ HInstruction* map = Add<HForInPrepareMap>(
+ environment()->LookupContext(), enumerable);
AddSimulate(stmt->PrepareId());
- HInstruction* array = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeCacheIndex));
+ HInstruction* array = Add<HForInCacheArray>(
+ enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
- HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
+ HInstruction* enum_length = Add<HMapEnumLength>(map);
- HInstruction* start_index = AddInstruction(new(zone()) HConstant(0));
+ HInstruction* start_index = Add<HConstant>(0);
Push(map);
Push(array);
Push(enum_length);
Push(start_index);
- HInstruction* index_cache = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
+ HInstruction* index_cache = Add<HForInCacheArray>(
+ enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
@@ -5401,7 +4718,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
compare_index->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
+ Representation::Smi(), Representation::Smi());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
@@ -5415,18 +4732,16 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(loop_body);
- HValue* key = AddInstruction(
- new(zone()) HLoadKeyed(
- environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0), // Iteration index.
- environment()->ExpressionStackAt(0),
- FAST_ELEMENTS));
+ HValue* key = Add<HLoadKeyed>(
+ environment()->ExpressionStackAt(2), // Enum cache.
+ environment()->ExpressionStackAt(0), // Iteration index.
+ environment()->ExpressionStackAt(0),
+ FAST_ELEMENTS);
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
- AddInstruction(new(zone()) HCheckMapValue(
- environment()->ExpressionStackAt(4),
- environment()->ExpressionStackAt(3)));
+ Add<HCheckMapValue>(environment()->ExpressionStackAt(4),
+ environment()->ExpressionStackAt(3));
Bind(each_var, key);
@@ -5516,9 +4831,9 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+ SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+ shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
@@ -5579,10 +4894,10 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !info()->has_global_object()) {
+ if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
@@ -5597,11 +4912,9 @@ HOptimizedGraphBuilder::GlobalPropertyAccess
HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
- int length = info()->scope()->ContextChainLength(var->scope());
+ int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- HInstruction* context_instruction = new(zone()) HOuterContext(context);
- AddInstruction(context_instruction);
- context = context_instruction;
+ context = Add<HOuterContext>(context);
}
return context;
}
@@ -5623,8 +4936,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
- HConstant* instr =
- new(zone()) HConstant(constant_value, Representation::Tagged());
+ HConstant* instr = new(zone()) HConstant(constant_value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5633,13 +4945,13 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
LookupGlobalProperty(variable, &lookup, false);
if (type == kUseCell &&
- info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->global_object()->IsAccessCheckNeeded()) {
type = kUseGeneric;
}
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+ Handle<GlobalObject> global(current_info()->global_object());
+ Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
HLoadGlobalCell* instr =
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
@@ -5684,8 +4996,7 @@ void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::None());
+ HConstant* instr = new(zone()) HConstant(expr->value());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5909,23 +5220,18 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
flags |= expr->has_function()
? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(closure_literals, Representation::Tagged()))));
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(literal_index, Representation::Tagged()))));
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(constant_properties, Representation::Tagged()))));
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(flags, Representation::Tagged()))));
+ Add<HPushArgument>(Add<HConstant>(closure_literals));
+ Add<HPushArgument>(Add<HConstant>(literal_index));
+ Add<HPushArgument>(Add<HConstant>(constant_properties));
+ Add<HPushArgument>(Add<HConstant>(flags));
Runtime::FunctionId function_id =
(expr->depth() > 1 || expr->may_store_doubles())
? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
- literal = AddInstruction(
- new(zone()) HCallRuntime(context,
- isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id),
- 4));
+ literal = Add<HCallRuntime>(context,
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 4);
}
// The object is expected in the bailout environment during computation
@@ -5946,7 +5252,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
@@ -5991,8 +5297,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// of the object. This makes sure that the original object won't
// be used by other optimized code before it is transformed
// (e.g. because of code motion).
- HToFastProperties* result = new(zone()) HToFastProperties(Pop());
- AddInstruction(result);
+ HToFastProperties* result = Add<HToFastProperties>(Pop());
return ast_context()->ReturnValue(result);
} else {
return ast_context()->ReturnValue(Pop());
@@ -6013,7 +5318,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Object> raw_boilerplate(literals->get(expr->literal_index()),
isolate());
+ bool uninitialized = false;
if (raw_boilerplate->IsUndefined()) {
+ uninitialized = true;
raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
@@ -6064,45 +5371,28 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
int literal_index = expr->literal_index();
- // TODO(mstarzinger): The following check and deopt is actually obsolete
- // but test cases for the tick processor fails because profile differs.
-
- // Deopt if the array literal boilerplate ElementsKind is of a type
- // different than the expected one. The check isn't necessary if the
- // boilerplate has already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- IfBuilder builder(this);
- HValue* boilerplate = AddInstruction(new(zone())
- HConstant(original_boilerplate_object, Representation::Tagged()));
- HValue* elements_kind = AddInstruction(new(zone())
- HElementsKind(boilerplate));
- HValue* expected_kind = AddInstruction(new(zone())
- HConstant(boilerplate_elements_kind, Representation::Integer32()));
- builder.IfCompare(elements_kind, expected_kind, Token::EQ);
- builder.Then();
- builder.ElseDeopt();
- }
-
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(literals, Representation::Tagged()))));
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(literal_index, Representation::Tagged()))));
- AddInstruction(new(zone()) HPushArgument(AddInstruction(
- new(zone()) HConstant(constants, Representation::Tagged()))));
+ Add<HPushArgument>(Add<HConstant>(literals));
+ Add<HPushArgument>(Add<HConstant>(literal_index));
+ Add<HPushArgument>(Add<HConstant>(constants));
Runtime::FunctionId function_id = (expr->depth() > 1)
? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
- literal = AddInstruction(
- new(zone()) HCallRuntime(context,
- isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id),
- 3));
+ literal = Add<HCallRuntime>(context,
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 3);
+
+ // De-opt if elements kind changed from boilerplate_elements_kind.
+ Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
+ isolate());
+ AddInstruction(HCheckMaps::New(literal, map, zone()));
}
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
+ // The literal index is on the stack, too.
+ Push(Add<HConstant>(expr->literal_index()));
HInstruction* elements = NULL;
@@ -6118,7 +5408,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
elements = AddLoadElements(literal);
- HValue* key = AddInstruction(new(zone()) HConstant(i));
+ HValue* key = Add<HConstant>(i);
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
@@ -6126,13 +5416,12 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyed(
- elements,
- key,
- value,
- boilerplate_elements_kind));
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value,
+ boilerplate_elements_kind);
+ instr->SetUninitialized(uninitialized);
break;
+ }
default:
UNREACHABLE();
break;
@@ -6140,6 +5429,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
AddSimulate(expr->GetIdForElement(i));
}
+
+ Drop(1); // array literal index
return ast_context()->ReturnValue(Pop());
}
@@ -6183,14 +5474,14 @@ static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
- BuildCheckNonSmi(object);
+ BuildCheckHeapObject(object);
AddInstruction(HCheckMaps::New(object, map, zone()));
}
void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map) {
- BuildCheckNonSmi(object);
+ BuildCheckHeapObject(object);
AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
@@ -6226,10 +5517,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
ASSERT(proto->GetPrototype(isolate())->IsNull());
}
ASSERT(proto->IsJSObject());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto)),
- zone()));
+ Add<HCheckPrototypeMaps>(Handle<JSObject>(JSObject::cast(map->prototype())),
+ Handle<JSObject>(JSObject::cast(proto)),
+ zone(), top_info());
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
@@ -6241,11 +5531,10 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
- HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
- HeapNumber::kSize, Representation::Integer32()));
- HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
+ HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+ HInstruction* double_box = Add<HAllocate>(
environment()->LookupContext(), heap_number_size,
- HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+ HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE);
AddStoreMapConstant(double_box, isolate()->factory()->heap_number_map());
AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
value, Representation::Double());
@@ -6265,7 +5554,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- instr->set_transition(transition);
+ instr->SetTransition(transition, top_info());
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
instr->SetGVNFlag(kChangesMaps);
@@ -6295,8 +5584,8 @@ HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
Handle<JSFunction> setter,
Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
+ Add<HPushArgument>(object);
+ Add<HPushArgument>(value);
return new(zone()) HCallConstantFunction(setter, 2);
}
@@ -6318,51 +5607,92 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
}
-bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
+HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- if (!name->Equals(isolate()->heap()->length_string())) return false;
+ // Use monomorphic load if property lookup results in the same field index
+ // for all maps. Requires special map check on the set of all handled maps.
+ if (types->length() > kMaxLoadPolymorphism) return NULL;
- for (int i = 0; i < types->length(); i++) {
- if (types->at(i)->instance_type() != JS_ARRAY_TYPE) return false;
- }
+ LookupResult lookup(isolate());
+ int count;
+ Representation representation = Representation::None();
+ HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
+ for (count = 0; count < types->length(); ++count) {
+ Handle<Map> map = types->at(count);
+ if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+
+ HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
+ Representation new_representation =
+ ComputeLoadStoreRepresentation(map, &lookup);
- BuildCheckNonSmi(object);
+ if (count == 0) {
+ // First time through the loop; set access and representation.
+ access = new_access;
+ } else if (!representation.IsCompatibleForLoad(new_representation)) {
+ // Representations did not match.
+ break;
+ } else if (access.offset() != new_access.offset()) {
+ // Offsets did not match.
+ break;
+ } else if (access.IsInobject() != new_access.IsInobject()) {
+ // In-objectness did not match.
+ break;
+ }
+ representation = representation.generalize(new_representation);
+ }
- HInstruction* typecheck =
- AddInstruction(HCheckMaps::New(object, types, zone()));
- HInstruction* instr = new(zone())
- HLoadNamedField(object, HObjectAccess::ForArrayLength(), typecheck);
+ if (count != types->length()) return NULL;
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- return true;
+ // Everything matched; can use monomorphic load.
+ BuildCheckHeapObject(object);
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ return BuildLoadNamedField(object, access, representation);
}
-void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+ Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
+ HInstruction* instr = TryLoadPolymorphicAsMonomorphic(
+ expr, object, types, name);
+ if (instr == NULL) {
+ // Something did not match; must use a polymorphic load.
+ BuildCheckHeapObject(object);
+ HValue* context = environment()->LookupContext();
+ instr = new(zone()) HLoadNamedFieldPolymorphic(
+ context, object, types, name, zone());
+ }
- if (HandlePolymorphicArrayLengthLoad(expr, object, types, name))
- return;
+ instr->set_position(expr->position());
+ return ast_context()->ReturnInstruction(instr, expr->id());
+}
- BuildCheckNonSmi(object);
- // Use monomorphic load if property lookup results in the same field index
+bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
+ int position,
+ BailoutId assignment_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
+ // Use monomorphic store if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
- HInstruction* instr = NULL;
+ if (types->length() > kMaxStorePolymorphism) return false;
+
+ // TODO(verwaest): Merge the checking logic with the code in
+ // TryLoadPolymorphicAsMonomorphic.
LookupResult lookup(isolate());
int count;
Representation representation = Representation::None();
HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0;
- count < types->length() && count < kMaxLoadPolymorphism;
- ++count) {
+ for (count = 0; count < types->length(); ++count) {
Handle<Map> map = types->at(count);
+ // Pass false to ignore transitions.
if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
@@ -6373,7 +5703,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
// First time through the loop; set access and representation.
access = new_access;
representation = new_representation;
- } else if (!representation.IsCompatibleForLoad(new_representation)) {
+ } else if (!representation.IsCompatibleForStore(new_representation)) {
// Representations did not match.
break;
} else if (access.offset() != new_access.offset()) {
@@ -6385,28 +5715,38 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
}
}
- if (count == types->length()) {
- // Everything matched; can use monomorphic load.
- AddInstruction(HCheckMaps::New(object, types, zone()));
- instr = BuildLoadNamedField(object, access, representation);
- } else {
- // Something did not match; must use a polymorphic load.
- HValue* context = environment()->LookupContext();
- instr = new(zone()) HLoadNamedFieldPolymorphic(
- context, object, types, name, zone());
- }
+ if (count != types->length()) return false;
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ // Everything matched; can use monomorphic store.
+ BuildCheckHeapObject(object);
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ HInstruction* store;
+ CHECK_ALIVE_OR_RETURN(
+ store = BuildStoreNamedField(
+ object, name, value, types->at(count - 1), &lookup),
+ true);
+ Push(value);
+ store->set_position(position);
+ AddInstruction(store);
+ AddSimulate(assignment_id);
+ ast_context()->ReturnValue(Pop());
+ return true;
}
void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- Assignment* expr,
+ BailoutId id,
+ int position,
+ BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name) {
+ if (TryStorePolymorphicAsMonomorphic(
+ position, assignment_id, object, value, types, name)) {
+ return;
+ }
+
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -6417,7 +5757,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
if (count == 0) {
- BuildCheckNonSmi(object);
+ BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
}
++count;
@@ -6429,9 +5769,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
set_current_block(if_true);
HInstruction* instr;
- CHECK_ALIVE(instr =
- BuildStoreNamedField(object, name, value, map, &lookup));
- instr->set_position(expr->position());
+ CHECK_ALIVE(
+ instr = BuildStoreNamedField(object, name, value, map, &lookup));
+ instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(value);
@@ -6448,7 +5788,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(expr->position());
+ instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
@@ -6460,10 +5800,10 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(id, REMOVABLE_SIMULATE);
} else {
Push(value);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(id, REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -6472,9 +5812,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
}
ASSERT(join != NULL);
- join->SetJoinId(expr->id());
+ join->SetJoinId(id);
set_current_block(join);
- if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -6489,54 +5829,9 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
HValue* value = environment()->ExpressionStackAt(0);
HValue* object = environment()->ExpressionStackAt(1);
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- HInstruction* instr = NULL;
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
- return;
- }
- Drop(2);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
- instr = new(zone()) HCallConstantFunction(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- value,
- map));
- }
-
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(expr, object, value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
- }
-
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
-
+ if (expr->IsUninitialized()) AddSoftDeoptimize();
+ return BuildStoreNamed(expr, expr->id(), expr->position(),
+ expr->AssignmentId(), prop, object, value);
} else {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
@@ -6568,33 +5863,86 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr =
- new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
+ Handle<GlobalObject> global(current_info()->global_object());
+ Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+ HInstruction* instr = Add<HStoreGlobalCell>(value, cell,
+ lookup.GetPropertyDetails());
instr->set_position(position);
- AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
} else {
HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>(context);
HStoreGlobalGeneric* instr =
- new(zone()) HStoreGlobalGeneric(context,
- global_object,
- var->name(),
- value,
- function_strict_mode_flag());
+ Add<HStoreGlobalGeneric>(context, global_object, var->name(),
+ value, function_strict_mode_flag());
instr->set_position(position);
- AddInstruction(instr);
ASSERT(instr->HasObservableSideEffects());
AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
}
+void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
+ BailoutId id,
+ int position,
+ BailoutId assignment_id,
+ Property* prop,
+ HValue* object,
+ HValue* value) {
+ Literal* key = prop->key()->AsLiteral();
+ Handle<String> name = Handle<String>::cast(key->value());
+ ASSERT(!name.is_null());
+
+ HInstruction* instr = NULL;
+ SmallMapList* types = expr->GetReceiverTypes();
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> map;
+ if (monomorphic) {
+ map = types->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map);
+ if (FLAG_inline_accessors &&
+ TryInlineSetter(setter, id, assignment_id, value)) {
+ return;
+ }
+ Drop(2);
+ Add<HPushArgument>(object);
+ Add<HPushArgument>(value);
+ instr = new(zone()) HCallConstantFunction(setter, 2);
+ } else {
+ Drop(2);
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
+
+ } else if (types != NULL && types->length() > 1) {
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(
+ id, position, assignment_id, object, value, types, name);
+ } else {
+ Drop(2);
+ instr = BuildStoreNamedGeneric(object, name, value);
+ }
+
+ Push(value);
+ instr->set_position(position);
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+ }
+ return ast_context()->ReturnValue(Pop());
+}
+
+
void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
@@ -6633,13 +5981,13 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will be allocated to context slots. We have no
// direct way to detect that the variable is a parameter so we do
// a linear search of the parameter variables.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
Bailout(
"assignment to parameter, function uses arguments object");
}
@@ -6663,9 +6011,8 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
- AddInstruction(instr);
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+ mode, Top());
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
@@ -6685,10 +6032,11 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
Handle<Map> map;
- HInstruction* load;
+ HInstruction* load = NULL;
+ SmallMapList* types = prop->GetReceiverTypes();
bool monomorphic = prop->IsMonomorphic();
if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
+ map = types->first();
// We can't generate code for a monomorphic dict mode load so
// just pretend it is not monomorphic.
if (map->is_dictionary_map()) monomorphic = false;
@@ -6701,9 +6049,10 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
} else {
load = BuildLoadNamedMonomorphic(object, name, prop, map);
}
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
+ } else if (types != NULL && types->length() > 1) {
+ load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
}
+ if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
PushAndAdd(load);
if (load->HasObservableSideEffects()) {
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -6719,31 +6068,8 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
AddSimulate(operation->id(), REMOVABLE_SIMULATE);
}
- HInstruction* store;
- if (!monomorphic || map->is_observed()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, instr, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- instr,
- map));
- }
- }
- AddInstruction(store);
- // Drop the simulated receiver and value. Return the value.
- Drop(2);
- Push(instr);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
-
+ return BuildStoreNamed(prop, expr->id(), expr->position(),
+ expr->AssignmentId(), prop, object, instr);
} else {
// Keyed property.
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -6816,7 +6142,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
- AddInstruction(new(zone()) HUseConst(old_value));
+ Add<HUseConst>(old_value);
}
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
@@ -6859,12 +6185,12 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct way
// to detect that the variable is a parameter.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
@@ -6897,9 +6223,8 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
- context, var->index(), mode, Top());
- AddInstruction(instr);
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+ mode, Top());
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
@@ -6933,9 +6258,8 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
HValue* context = environment()->LookupContext();
HValue* value = environment()->Pop();
- HThrow* instr = new(zone()) HThrow(context, value);
+ HThrow* instr = Add<HThrow>(context, value);
instr->set_position(expr->position());
- AddInstruction(instr);
AddSimulate(expr->id());
current_block()->FinishExit(new(zone()) HAbnormalExit);
set_current_block(NULL);
@@ -6981,7 +6305,7 @@ HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
Handle<JSFunction> getter,
Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
- AddInstruction(new(zone()) HPushArgument(object));
+ Add<HPushArgument>(object);
return new(zone()) HCallConstantFunction(getter, 1);
}
@@ -7016,7 +6340,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
if (lookup.IsConstantFunction()) {
AddCheckMap(object, map);
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new(zone()) HConstant(function, Representation::Tagged());
+ return new(zone()) HConstant(function);
}
// Handle a load from a known field somewhere in the prototype chain.
@@ -7026,10 +6350,8 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
- HValue* holder_value = AddInstruction(new(zone())
- HConstant(holder, Representation::Tagged()));
+ Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
+ HValue* holder_value = Add<HConstant>(holder);
return BuildLoadNamedField(holder_value,
HObjectAccess::ForField(holder_map, &lookup, name),
ComputeLoadStoreRepresentation(map, &lookup));
@@ -7041,9 +6363,9 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
- return new(zone()) HConstant(function, Representation::Tagged());
+ return new(zone()) HConstant(function);
}
// No luck, do a generic load.
@@ -7078,8 +6400,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, object_prototype, zone()));
+ Add<HCheckPrototypeMaps>(prototype, object_prototype, zone(), top_info());
load_mode = ALLOW_RETURN_HOLE;
graph()->MarkDependsOnEmptyArrayProtoElements();
}
@@ -7158,7 +6479,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
- BuildCheckNonSmi(object);
+ BuildCheckHeapObject(object);
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
@@ -7211,9 +6532,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
HValue* context = environment()->LookupContext();
- transition = new(zone()) HTransitionElementsKind(
- context, object, map, transition_target.at(i));
- AddInstruction(transition);
+ transition = Add<HTransitionElementsKind>(context, object, map,
+ transition_target.at(i));
} else {
type_todo[map->elements_kind()] = true;
if (IsExternalArrayElementsKind(map->elements_kind())) {
@@ -7246,8 +6566,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
- HInstruction* elements_kind_instr =
- AddInstruction(new(zone()) HElementsKind(object));
+ HInstruction* elements_kind_instr = Add<HElementsKind>(object);
HInstruction* elements = AddLoadElements(object, checkspec);
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@@ -7268,11 +6587,9 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
&& todo_external_array) {
- HInstruction* length =
- AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(key, length);
- external_elements = new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
+ HInstruction* length = AddLoadFixedArrayLength(elements);
+ checked_key = Add<HBoundsCheck>(key, length);
+ external_elements = Add<HLoadExternalArrayPointer>(elements);
}
if (type_todo[elements_kind]) {
HBasicBlock* if_true = graph()->CreateBasicBlock();
@@ -7313,7 +6630,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
typecheck, Representation::Smi());
length->set_type(HType::Smi());
- checked_key = AddBoundsCheck(key, length);
+ checked_key = Add<HBoundsCheck>(key, length);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -7330,8 +6647,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
if_jsarray->GotoNoSimulate(join);
set_current_block(if_fastobject);
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(key, length);
+ length = AddLoadFixedArrayLength(elements);
+ checked_key = Add<HBoundsCheck>(key, length);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -7383,7 +6700,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
: BuildLoadKeyedGeneric(obj, key);
AddInstruction(instr);
} else {
- BuildCheckNonSmi(obj);
+ BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
@@ -7430,7 +6747,8 @@ void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
HEnterInlined* entry = function_state()->entry();
entry->set_arguments_pushed();
- ZoneList<HValue*>* arguments_values = entry->arguments_values();
+ HArgumentsObject* arguments = entry->arguments_object();
+ const ZoneList<HValue*>* arguments_values = arguments->arguments_values();
HInstruction* insert_after = entry;
for (int i = 0; i < arguments_values->length(); i++) {
@@ -7462,8 +6780,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
+ HInstruction* elements = Add<HArgumentsElements>(false);
result = new(zone()) HArgumentsLength(elements);
} else {
// Number of arguments without receiver.
@@ -7478,11 +6795,9 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HValue* key = Pop();
Drop(1); // Arguments object.
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(
- new(zone()) HArgumentsLength(elements));
- HInstruction* checked_key = AddBoundsCheck(key, length);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HInstruction* checked_key = Add<HBoundsCheck>(key, length);
result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
@@ -7491,9 +6806,8 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* elements = function_state()->arguments_elements();
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
- HInstruction* length = AddInstruction(new(zone()) HConstant(
- argument_count));
- HInstruction* checked_key = AddBoundsCheck(key, length);
+ HInstruction* length = Add<HConstant>(argument_count);
+ HInstruction* checked_key = Add<HBoundsCheck>(key, length);
result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
}
}
@@ -7514,7 +6828,7 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
HInstruction* instr = NULL;
if (expr->IsStringLength()) {
HValue* string = Pop();
- BuildCheckNonSmi(string);
+ BuildCheckHeapObject(string);
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = HStringLength::New(zone(), string);
} else if (expr->IsStringAccess()) {
@@ -7529,7 +6843,7 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
- BuildCheckNonSmi(function);
+ BuildCheckHeapObject(function);
instr = new(zone()) HLoadFunctionPrototype(function);
} else if (expr->key()->IsPropertyName()) {
@@ -7552,7 +6866,7 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
if (LookupGetter(map, name, &getter, &holder)) {
AddCheckConstantFunction(holder, Top(), map);
if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
- AddInstruction(new(zone()) HPushArgument(Pop()));
+ Add<HPushArgument>(Pop());
instr = new(zone()) HCallConstantFunction(getter, 1);
} else {
instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
@@ -7594,8 +6908,7 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map) {
if (!holder.is_null()) {
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
}
}
@@ -7703,7 +7016,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
empty_smi_block->Goto(number_block);
set_current_block(not_smi_block);
} else {
- BuildCheckNonSmi(receiver);
+ BuildCheckHeapObject(receiver);
}
}
HBasicBlock* if_true = graph()->CreateBasicBlock();
@@ -7740,7 +7053,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
expr->ComputeTarget(map, name);
AddCheckPrototypeMaps(expr->holder(), map);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
@@ -7824,7 +7137,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
@@ -7860,16 +7173,16 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
}
-#if !defined(V8_TARGET_ARCH_IA32)
+#if !V8_TARGET_ARCH_IA32
// Target must be able to use caller's context.
- CompilationInfo* outer_info = info();
+ CompilationInfo* outer_info = current_info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
@@ -7998,7 +7311,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Type-check the inlined function.
ASSERT(target_shared->has_deoptimization_support());
- AstTyper::Type(&target_info);
+ AstTyper::Run(&target_info);
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
@@ -8016,53 +7329,39 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
undefined,
function_state()->inlining_kind(),
undefined_receiver);
-#ifdef V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
- HConstant* context =
- new(zone()) HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
- AddInstruction(context);
+ HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
#endif
AddSimulate(return_id);
current_block()->UpdateEnvironment(inner_env);
- ZoneList<HValue*>* arguments_values = NULL;
+ HArgumentsObject* arguments_object = NULL;
- // If the function uses arguments copy current arguments values
- // to use them for materialization.
+ // If the function uses arguments object create and bind one, also copy
+ // current arguments values to use them for materialization.
if (function->scope()->arguments() != NULL) {
+ ASSERT(function->scope()->arguments()->IsStackAllocated());
HEnvironment* arguments_env = inner_env->arguments_environment();
int arguments_count = arguments_env->parameter_count();
- arguments_values = new(zone()) ZoneList<HValue*>(arguments_count, zone());
+ arguments_object = Add<HArgumentsObject>(arguments_count, zone());
+ inner_env->Bind(function->scope()->arguments(), arguments_object);
for (int i = 0; i < arguments_count; i++) {
- arguments_values->Add(arguments_env->Lookup(i), zone());
+ arguments_object->AddArgument(arguments_env->Lookup(i), zone());
}
}
HEnterInlined* enter_inlined =
- new(zone()) HEnterInlined(target,
- arguments_count,
- function,
- function_state()->inlining_kind(),
- function->scope()->arguments(),
- arguments_values,
- undefined_receiver,
- zone());
+ Add<HEnterInlined>(target, arguments_count, function,
+ function_state()->inlining_kind(),
+ function->scope()->arguments(),
+ arguments_object, undefined_receiver, zone());
function_state()->set_entry(enter_inlined);
- AddInstruction(enter_inlined);
-
- // If the function uses arguments object create and bind one.
- if (function->scope()->arguments() != NULL) {
- ASSERT(function->scope()->arguments()->IsStackAllocated());
- inner_env->Bind(function->scope()->arguments(),
- graph()->GetArgumentsObject());
- }
-
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
@@ -8209,14 +7508,14 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
+ BailoutId id,
+ BailoutId assignment_id,
HValue* implicit_return_value) {
return TryInline(CALL_AS_METHOD,
setter,
1,
implicit_return_value,
- assignment->id(),
- assignment->AssignmentId(),
+ id, assignment_id,
SETTER_CALL_RETURN);
}
@@ -8300,11 +7599,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* string = Pop();
HValue* context = environment()->LookupContext();
ASSERT(!expr->holder().is_null());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
- expr->holder()->GetIsolate()),
- expr->holder(),
- zone()));
+ Add<HCheckPrototypeMaps>(Call::GetPrototypeForPrimitiveCheck(
+ STRING_CHECK, expr->holder()->GetIsolate()),
+ expr->holder(), zone(), top_info());
HInstruction* char_code =
BuildStringCharCodeAt(context, string, index);
if (id == kStringCharCodeAt) {
@@ -8368,16 +7665,14 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
result =
HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
} else if (exponent == -0.5) {
- HConstant* double_one = new(zone()) HConstant(
- 1, Representation::Double());
- AddInstruction(double_one);
+ HValue* one = graph()->GetConstant1();
HInstruction* sqrt =
HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
AddInstruction(sqrt);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!sqrt->HasObservableSideEffects());
- result = HDiv::New(zone(), context, double_one, sqrt);
+ result = HDiv::New(zone(), context, one, sqrt);
} else if (exponent == 2.0) {
result = HMul::New(zone(), context, left, left);
}
@@ -8397,8 +7692,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>(context);
HRandom* result = new(zone()) HRandom(global_object);
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -8455,7 +7749,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
return false;
}
- if (info()->scope()->arguments() == NULL) return false;
+ if (current_info()->scope()->arguments() == NULL) return false;
ZoneList<Expression*>* args = expr->arguments();
if (args->length() != 2) return false;
@@ -8477,12 +7771,9 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HValue* receiver = Pop();
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length =
- AddInstruction(new(zone()) HArgumentsLength(elements));
- HValue* wrapped_receiver =
- AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HValue* wrapped_receiver = Add<HWrapReceiver>(receiver, function);
HInstruction* result =
new(zone()) HApplyArguments(function,
wrapped_receiver,
@@ -8494,13 +7785,10 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
} else {
// We are inside inlined function and we know exactly what is inside
// arguments object. But we need to be able to materialize at deopt.
- // TODO(mstarzinger): For now we just ensure arguments are pushed
- // right after HEnterInlined, but we could be smarter about this.
- EnsureArgumentsArePushedForAccess();
ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
- function_state()->entry()->arguments_values()->length());
- HEnterInlined* entry = function_state()->entry();
- ZoneList<HValue*>* arguments_values = entry->arguments_values();
+ function_state()->entry()->arguments_object()->arguments_count());
+ HArgumentsObject* args = function_state()->entry()->arguments_object();
+ const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
PushAndAdd(new(zone()) HWrapReceiver(receiver, function));
for (int i = 1; i < arguments_count; i++) {
@@ -8696,8 +7984,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(info()->global_object());
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
@@ -8710,14 +7998,12 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+ Add<HCheckFunction>(function, expr->target());
// Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver =
- new(zone()) HGlobalReceiver(global_object);
+ HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
- AddInstruction(global_receiver);
ASSERT(environment()->ExpressionStackAt(receiver_index)->
IsGlobalObject());
environment()->SetExpressionStackAt(receiver_index, global_receiver);
@@ -8732,16 +8018,23 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(info()->closure())) {
+ if (expr->target().is_identical_to(current_info()->closure())) {
graph()->MarkRecursive();
}
- call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
+ if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
+ // When the target has a custom call IC generator, use the IC,
+ // because it is likely to generate better code.
+ HValue* context = environment()->LookupContext();
+ call = PreProcessCall(
+ new(zone()) HCallNamed(context, var->name(), argument_count));
+ } else {
+ call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
argument_count));
+ }
} else {
HValue* context = environment()->LookupContext();
- HGlobalObject* receiver = new(zone()) HGlobalObject(context);
- AddInstruction(receiver);
+ HGlobalObject* receiver = Add<HGlobalObject>(context);
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
@@ -8755,12 +8048,11 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HValue* context = environment()->LookupContext();
- HGlobalObject* global = new(zone()) HGlobalObject(context);
- AddInstruction(global);
+ HGlobalObject* global = Add<HGlobalObject>(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+ Add<HCheckFunction>(function, expr->target());
if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
if (FLAG_trace_inlining) {
@@ -8786,10 +8078,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
- AddInstruction(receiver);
+ HGlobalObject* global_object = Add<HGlobalObject>(context);
+ HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
@@ -8807,8 +8097,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
- constructor->initial_map()->InitialPropertiesLength() == 0;
+ constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
}
@@ -8818,7 +8107,6 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(current_block()->HasPredecessor());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* context = environment()->LookupContext();
- Factory* factory = isolate()->factory();
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
@@ -8829,8 +8117,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
- HValue* check = AddInstruction(
- new(zone()) HCheckFunction(function, constructor));
+ HValue* check = Add<HCheckFunction>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
@@ -8838,84 +8125,19 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
constructor->shared()->CompleteInobjectSlackTracking();
}
- // Calculate instance size from initial map of constructor.
- ASSERT(constructor->has_initial_map());
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->InitialPropertiesLength() == 0);
-
- // Allocate an instance of the implicit receiver object.
- HValue* size_in_bytes =
- AddInstruction(new(zone()) HConstant(instance_size,
- Representation::Integer32()));
-
- HAllocate::Flags flags = HAllocate::DefaultFlags();
- if (FLAG_pretenuring_call_new &&
- isolate()->heap()->ShouldGloballyPretenure()) {
- flags = static_cast<HAllocate::Flags>(
- flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
- }
-
- HInstruction* receiver =
- AddInstruction(new(zone()) HAllocate(context,
- size_in_bytes,
- HType::JSObject(),
- flags));
- HAllocate::cast(receiver)->set_known_initial_map(initial_map);
-
- // Load the initial map from the constructor.
- HValue* constructor_value =
- AddInstruction(new(zone()) HConstant(constructor,
- Representation::Tagged()));
- HValue* initial_map_value =
- AddLoad(constructor_value, HObjectAccess::ForJSObjectOffset(
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- { NoObservableSideEffectsScope no_effects(this);
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- AddStore(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
- initial_map_value);
- HValue* empty_fixed_array =
- AddInstruction(new(zone()) HConstant(factory->empty_fixed_array(),
- Representation::Tagged()));
- AddStore(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
- empty_fixed_array);
- AddStore(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
- empty_fixed_array);
- if (initial_map->inobject_properties() != 0) {
- HConstant* undefined = graph()->GetConstantUndefined();
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- AddStore(receiver,
- HObjectAccess::ForJSObjectOffset(property_offset),
- undefined);
- }
- }
- }
-
- // Replace the constructor function with a newly allocated receiver using
- // the index of the receiver from the top of the expression stack.
+ // Replace the constructor function with a newly allocated receiver.
+ HInstruction* receiver = Add<HAllocateObject>(context, constructor);
+ // Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) return;
- // TODO(mstarzinger): For now we remove the previous HAllocate and all
- // corresponding instructions and instead add HPushArgument for the
- // arguments in case inlining failed. What we actually should do is for
- // inlining to try to build a subgraph without mutating the parent graph.
- HInstruction* instr = current_block()->last();
- while (instr != initial_map_value) {
- HInstruction* prev_instr = instr->previous();
- instr->DeleteAndReplaceWith(NULL);
- instr = prev_instr;
- }
- initial_map_value->DeleteAndReplaceWith(NULL);
+ // TODO(mstarzinger): For now we remove the previous HAllocateObject and
+ // add HPushArgument for the arguments in case inlining failed. What we
+ // actually should do is emit HInvokeFunction on the constructor instead
+ // of using HCallNew as a fallback.
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
@@ -8926,18 +8148,17 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
- bool use_call_new_array = FLAG_optimize_constructed_arrays &&
- !(expr->target().is_null()) &&
- *(expr->target()) == isolate()->global_context()->array_function();
-
+ Handle<JSFunction> array_function(
+ isolate()->global_context()->array_function(), isolate());
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
- if (use_call_new_array) {
- Handle<JSGlobalPropertyCell> cell = expr->allocation_info_cell();
+ if (expr->target().is_identical_to(array_function)) {
+ Handle<Cell> cell = expr->allocation_info_cell();
+ Add<HCheckFunction>(constructor, array_function);
call = new(zone()) HCallNewArray(context, constructor, argument_count,
- cell);
+ cell, expr->elements_kind());
} else {
call = new(zone()) HCallNew(context, constructor, argument_count);
}
@@ -9074,11 +8295,10 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
HValue* context = environment()->LookupContext();
HInstruction* instr =
HMul::New(zone(), context, value, graph()->GetConstantMinus1());
- TypeInfo info = expr->type();
- Representation rep = ToRepresentation(info);
- if (info.IsUninitialized()) {
+ Handle<Type> operand_type = expr->expression()->lower_type();
+ Representation rep = ToRepresentation(operand_type);
+ if (operand_type->Is(Type::None())) {
AddSoftDeoptimize();
- info = TypeInfo::Unknown();
}
if (instr->IsBinaryOperation()) {
HBinaryOperation::cast(instr)->set_observed_input_representation(1, rep);
@@ -9091,8 +8311,8 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- TypeInfo info = expr->type();
- if (info.IsUninitialized()) {
+ Handle<Type> operand_type = expr->expression()->lower_type();
+ if (operand_type->Is(Type::None())) {
AddSoftDeoptimize();
}
HInstruction* instr = new(zone()) HBitNot(value);
@@ -9150,8 +8370,8 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
// The input to the count operation is on top of the expression stack.
TypeInfo info = expr->type();
Representation rep = ToRepresentation(info);
- if (rep.IsTagged()) {
- rep = Representation::Integer32();
+ if (rep.IsNone() || rep.IsTagged()) {
+ rep = Representation::Smi();
}
if (returns_original_input) {
@@ -9159,8 +8379,11 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
// as the return value.
- HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
- AddInstruction(number_input);
+ HInstruction* number_input = Add<HForceRepresentation>(Pop(), rep);
+ if (!rep.IsDouble()) {
+ number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+ number_input->SetFlag(HInstruction::kCannotBeTagged);
+ }
Push(number_input);
}
@@ -9172,10 +8395,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
: graph()->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = HAdd::New(zone(), context, Top(), delta);
- // We can't insert a simulate here, because it would break deoptimization,
- // so the HAdd must not have side effects, so we must freeze its
- // representation.
- instr->AssumeRepresentation(rep);
+ instr->SetFlag(HInstruction::kCannotBeTagged);
instr->ClearAllSideEffects();
AddInstruction(instr);
return instr;
@@ -9231,13 +8451,13 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct
// way to detect that the variable is a parameter so we use a
// linear search of the parameter list.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
@@ -9246,9 +8466,8 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, after);
- AddInstruction(instr);
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+ mode, after);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
@@ -9272,10 +8491,11 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
Handle<Map> map;
- HInstruction* load;
+ HInstruction* load = NULL;
bool monomorphic = prop->IsMonomorphic();
+ SmallMapList* types = prop->GetReceiverTypes();
if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
+ map = types->first();
if (map->is_dictionary_map()) monomorphic = false;
}
if (monomorphic) {
@@ -9286,9 +8506,10 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
} else {
load = BuildLoadNamedMonomorphic(object, name, prop, map);
}
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
+ } else if (types != NULL && types->length() > 1) {
+ load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
}
+ if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
PushAndAdd(load);
if (load->HasObservableSideEffects()) {
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -9376,16 +8597,16 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
int32_t i = c_index->NumberValueAsInteger32();
Handle<String> s = c_string->StringValue();
if (i < 0 || i >= s->length()) {
- return new(zone()) HConstant(OS::nan_value(), Representation::Double());
+ return new(zone()) HConstant(OS::nan_value());
}
return new(zone()) HConstant(s->Get(i));
}
}
- BuildCheckNonSmi(string);
+ BuildCheckHeapObject(string);
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HInstruction* length = HStringLength::New(zone(), string);
AddInstruction(length);
- HInstruction* checked_index = AddBoundsCheck(index, length);
+ HInstruction* checked_index = Add<HBoundsCheck>(index, length);
return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
@@ -9452,27 +8673,29 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo left_info = expr->left_type();
- TypeInfo right_info = expr->right_type();
- TypeInfo result_info = expr->result_type();
- bool has_fixed_right_arg = expr->has_fixed_right_arg();
- int fixed_right_arg_value = expr->fixed_right_arg_value();
- Representation left_rep = ToRepresentation(left_info);
- Representation right_rep = ToRepresentation(right_info);
- Representation result_rep = ToRepresentation(result_info);
- if (left_info.IsUninitialized()) {
- // Can't have initialized one but not the other.
- ASSERT(right_info.IsUninitialized());
+ Handle<Type> left_type = expr->left()->lower_type();
+ Handle<Type> right_type = expr->right()->lower_type();
+ Handle<Type> result_type = expr->result_type();
+ Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+ Representation left_rep = ToRepresentation(left_type);
+ Representation right_rep = ToRepresentation(right_type);
+ Representation result_rep = ToRepresentation(result_type);
+ if (left_type->Is(Type::None())) {
AddSoftDeoptimize();
- left_info = right_info = TypeInfo::Unknown();
+ // TODO(rossberg): we should be able to get rid of non-continuous defaults.
+ left_type = handle(Type::Any(), isolate());
+ }
+ if (right_type->Is(Type::None())) {
+ AddSoftDeoptimize();
+ right_type = handle(Type::Any(), isolate());
}
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- if (left_info.IsString() && right_info.IsString()) {
- BuildCheckNonSmi(left);
+ if (left_type->Is(Type::String()) && right_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- BuildCheckNonSmi(right);
+ BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
instr = HStringAdd::New(zone(), context, left, right);
} else {
@@ -9486,12 +8709,7 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
instr = HMul::New(zone(), context, left, right);
break;
case Token::MOD:
- instr = HMod::New(zone(),
- context,
- left,
- right,
- has_fixed_right_arg,
- fixed_right_arg_value);
+ instr = HMod::New(zone(), context, left, right, fixed_right_arg);
break;
case Token::DIV:
instr = HDiv::New(zone(), context, left, right);
@@ -9502,7 +8720,8 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
- if (left_info.IsInteger32() && right_info.IsInteger32() &&
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = new(zone()) HRor(context, operand, shift_amount);
} else {
@@ -9544,7 +8763,7 @@ static bool IsClassOfTest(CompareOperation* expr) {
if (call == NULL) return false;
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
- if (!literal->handle()->IsString()) return false;
+ if (!literal->value()->IsString()) return false;
if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
return false;
}
@@ -9688,8 +8907,10 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
}
+// TODO(rossberg): this should die eventually.
Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
if (info.IsUninitialized()) return Representation::None();
+ // TODO(verwaest): Return Smi rather than Integer32.
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
@@ -9698,6 +8919,14 @@ Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
}
+Representation HOptimizedGraphBuilder::ToRepresentation(Handle<Type> type) {
+ if (type->Is(Type::None())) return Representation::None();
+ if (type->Is(Type::Signed32())) return Representation::Integer32();
+ if (type->Is(Type::Number())) return Representation::Double();
+ return Representation::Tagged();
+}
+
+
void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
HTypeof* typeof_expr,
Handle<String> check) {
@@ -9780,25 +9009,19 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
- Handle<String> rhs = Handle<String>::cast(literal->handle());
+ Handle<String> rhs = Handle<String>::cast(literal->value());
HClassOfTestAndBranch* instr =
new(zone()) HClassOfTestAndBranch(value, rhs);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo left_type = expr->left_type();
- TypeInfo right_type = expr->right_type();
- TypeInfo overall_type = expr->overall_type();
- Representation combined_rep = ToRepresentation(overall_type);
+ Handle<Type> left_type = expr->left()->lower_type();
+ Handle<Type> right_type = expr->right()->lower_type();
+ Handle<Type> combined_type = expr->combined_type();
+ Representation combined_rep = ToRepresentation(combined_type);
Representation left_rep = ToRepresentation(left_type);
Representation right_rep = ToRepresentation(right_type);
- // Check if this expression was ever executed according to type feedback.
- // Note that for the special typeof/null/undefined cases we get unknown here.
- if (overall_type.IsUninitialized()) {
- AddSoftDeoptimize();
- overall_type = left_type = right_type = TypeInfo::Unknown();
- }
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
@@ -9836,10 +9059,10 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
- info()->has_global_object() &&
- !info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->has_global_object() &&
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
@@ -9859,23 +9082,35 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- AddInstruction(new(zone()) HCheckFunction(right, target));
+ Add<HCheckFunction>(right, target);
HInstanceOfKnownGlobal* result =
new(zone()) HInstanceOfKnownGlobal(context, left, target);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
}
+
+ // Code below assumes that we don't fall through.
+ UNREACHABLE();
} else if (op == Token::IN) {
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (overall_type.IsNonPrimitive()) {
+ }
+
+ // Cases handled below depend on collected type feedback. They should
+ // soft deoptimize when there is no type feedback.
+ if (combined_type->Is(Type::None())) {
+ AddSoftDeoptimize();
+ combined_type = left_type = right_type = handle(Type::Any(), isolate());
+ }
+
+ if (combined_type->Is(Type::Receiver())) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
// Can we get away with map check and not instance type check?
- Handle<Map> map = expr->map();
- if (!map.is_null()) {
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass();
AddCheckMapsWithTransitions(left, map);
AddCheckMapsWithTransitions(right, map);
HCompareObjectEqAndBranch* result =
@@ -9883,9 +9118,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- BuildCheckNonSmi(left);
+ BuildCheckHeapObject(left);
AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
- BuildCheckNonSmi(right);
+ BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -9896,11 +9131,11 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (overall_type.IsInternalizedString() &&
+ } else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
- BuildCheckNonSmi(left);
+ BuildCheckHeapObject(left);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
- BuildCheckNonSmi(right);
+ BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -9915,6 +9150,10 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
+ // TODO(verwaest): Remove once ToRepresentation properly returns Smi when
+ // the IC measures Smi.
+ if (left_type->Is(Type::Smi())) left_rep = Representation::Smi();
+ if (right_type->Is(Type::Smi())) right_rep = Representation::Smi();
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
@@ -9931,8 +9170,8 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
HIfContinuation continuation;
- CompareNilICStub::Types types;
if (expr->op() == Token::EQ_STRICT) {
IfBuilder if_nil(this);
if_nil.If<HCompareObjectEqAndBranch>(
@@ -9943,11 +9182,9 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
if_nil.CaptureContinuation(&continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
- types = CompareNilICStub::Types(expr->compare_nil_types());
- if (types.IsEmpty()) types = CompareNilICStub::Types::FullCompare();
- Handle<Map> map_handle = expr->map();
- BuildCompareNil(value, types, map_handle,
- expr->position(), &continuation);
+ Handle<Type> type = expr->combined_type()->Is(Type::None())
+ ? handle(Type::Any(), isolate_) : expr->combined_type();
+ BuildCompareNil(value, type, expr->position(), &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
@@ -9957,8 +9194,7 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
return new(zone()) HConstant(
- function_state()->compilation_info()->closure(),
- Representation::Tagged());
+ function_state()->compilation_info()->closure());
} else {
return new(zone()) HThisFunction;
}
@@ -9972,31 +9208,43 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
int data_size,
int pointer_size,
AllocationSiteMode mode) {
- Zone* zone = this->zone();
- int total_size = data_size + pointer_size;
-
NoObservableSideEffectsScope no_effects(this);
- HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
- // TODO(hpayer): add support for old data space
- if (isolate()->heap()->ShouldGloballyPretenure() &&
- data_size == 0) {
- flags = static_cast<HAllocate::Flags>(
- flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ HInstruction* target = NULL;
+ HInstruction* data_target = NULL;
+
+ HAllocate::Flags flags = HAllocate::DefaultFlags();
+
+ if (isolate()->heap()->ShouldGloballyPretenure()) {
+ if (data_size != 0) {
+ HAllocate::Flags data_flags =
+ static_cast<HAllocate::Flags>(HAllocate::DefaultFlags() |
+ HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
+ HValue* size_in_bytes = Add<HConstant>(data_size);
+ data_target = Add<HAllocate>(context, size_in_bytes,
+ HType::JSObject(), data_flags);
+ Handle<Map> free_space_map = isolate()->factory()->free_space_map();
+ AddStoreMapConstant(data_target, free_space_map);
+ HObjectAccess access =
+ HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
+ AddStore(data_target, access, size_in_bytes);
+ }
+ if (pointer_size != 0) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ HValue* size_in_bytes = Add<HConstant>(pointer_size);
+ target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
+ }
+ } else {
+ HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
+ target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
}
- HValue* size_in_bytes =
- AddInstruction(new(zone) HConstant(total_size,
- Representation::Integer32()));
- HInstruction* result =
- AddInstruction(new(zone) HAllocate(context,
- size_in_bytes,
- HType::JSObject(),
- flags));
int offset = 0;
- BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, result,
- &offset, mode);
- return result;
+ int data_offset = 0;
+ BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, target,
+ &offset, data_target, &data_offset, mode);
+ return target;
}
@@ -10005,46 +9253,56 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
Handle<JSObject> original_boilerplate_object,
HInstruction* target,
int* offset,
+ HInstruction* data_target,
+ int* data_offset,
AllocationSiteMode mode) {
- Zone* zone = this->zone();
-
Handle<FixedArrayBase> elements(boilerplate_object->elements());
Handle<FixedArrayBase> original_elements(
original_boilerplate_object->elements());
ElementsKind kind = boilerplate_object->map()->elements_kind();
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
int object_offset = *offset;
int object_size = boilerplate_object->map()->instance_size();
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
- int elements_offset = *offset + object_size;
+ int elements_offset = 0;
- *offset += object_size + elements_size;
+ if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
+ elements_offset = *data_offset;
+ *data_offset += elements_size;
+ } else {
+ // Place elements right after this object.
+ elements_offset = *offset + object_size;
+ *offset += elements_size;
+ }
+ // Increase the offset so that subsequent objects end up right after this
+ // object (and it's elements if they are allocated in the same space).
+ *offset += object_size;
// Copy object elements if non-COW.
HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
- object_offset, elements_offset, elements_size);
+ data_target, object_offset, elements_offset, elements_size);
if (object_elements != NULL) {
BuildEmitElements(elements, original_elements, kind, object_elements,
- target, offset);
+ target, offset, data_target, data_offset);
}
// Copy in-object properties.
- HValue* object_properties =
- AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
- BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
- object_properties, target, offset);
+ if (boilerplate_object->map()->NumberOfFields() != 0) {
+ HValue* object_properties =
+ Add<HInnerAllocatedObject>(target, object_offset);
+ BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
+ object_properties, target, offset, data_target, data_offset);
+ }
// Create allocation site info.
if (mode == TRACK_ALLOCATION_SITE &&
boilerplate_object->map()->CanTrackAllocationSite()) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
- HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
- original_boilerplate_object, Representation::Tagged()));
+ HInstruction* original_boilerplate =
+ Add<HConstant>(original_boilerplate_object);
BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
}
}
@@ -10053,15 +9311,14 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
HInstruction* target,
+ HInstruction* data_target,
int object_offset,
int elements_offset,
int elements_size) {
ASSERT(boilerplate_object->properties()->length() == 0);
- Zone* zone = this->zone();
HValue* result = NULL;
- HValue* object_header =
- AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
+ HValue* object_header = Add<HInnerAllocatedObject>(target, object_offset);
Handle<Map> boilerplate_object_map(boilerplate_object->map());
AddStoreMapConstant(object_header, boilerplate_object_map);
@@ -10069,11 +9326,13 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
if (elements_size == 0) {
Handle<Object> elements_field =
Handle<Object>(boilerplate_object->elements(), isolate());
- elements = AddInstruction(new(zone) HConstant(
- elements_field, Representation::Tagged()));
+ elements = Add<HConstant>(elements_field);
} else {
- elements = AddInstruction(new(zone) HInnerAllocatedObject(
- target, elements_offset));
+ if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
+ elements = Add<HInnerAllocatedObject>(data_target, elements_offset);
+ } else {
+ elements = Add<HInnerAllocatedObject>(target, elements_offset);
+ }
result = elements;
}
AddStore(object_header, HObjectAccess::ForElementsPointer(), elements);
@@ -10081,8 +9340,7 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
- HInstruction* properties = AddInstruction(new(zone) HConstant(
- properties_field, Representation::None()));
+ HInstruction* properties = Add<HConstant>(properties_field);
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
AddStore(object_header, access, properties);
@@ -10091,8 +9349,7 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSArray>::cast(boilerplate_object);
Handle<Object> length_field =
Handle<Object>(boilerplate_array->length(), isolate());
- HInstruction* length = AddInstruction(new(zone) HConstant(
- length_field, Representation::None()));
+ HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
Representation representation =
@@ -10111,8 +9368,9 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> original_boilerplate_object,
HValue* object_properties,
HInstruction* target,
- int* offset) {
- Zone* zone = this->zone();
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -10139,28 +9397,32 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
isolate()));
- HInstruction* value_instruction =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
+ *offset);
AddStore(object_properties, access, value_instruction);
BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, DONT_TRACK_ALLOCATION_SITE);
+ offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
} else {
Representation representation = details.representation();
- HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
- value, Representation::Tagged()));
+ HInstruction* value_instruction = Add<HConstant>(value);
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
- HInstruction* double_box =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ HInstruction* double_box;
+ if (data_target != NULL) {
+ double_box = Add<HInnerAllocatedObject>(data_target, *data_offset);
+ *data_offset += HeapNumber::kSize;
+ } else {
+ double_box = Add<HInnerAllocatedObject>(target, *offset);
+ *offset += HeapNumber::kSize;
+ }
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
value_instruction, Representation::Double());
value_instruction = double_box;
- *offset += HeapNumber::kSize;
}
AddStore(object_properties, access, value_instruction);
@@ -10168,9 +9430,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
}
int inobject_properties = boilerplate_object->map()->inobject_properties();
- HInstruction* value_instruction = AddInstruction(new(zone)
- HConstant(isolate()->factory()->one_pointer_filler_map(),
- Representation::Tagged()));
+ HInstruction* value_instruction =
+ Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
for (int i = copied_fields; i < inobject_properties; i++) {
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
@@ -10186,12 +9447,11 @@ void HOptimizedGraphBuilder::BuildEmitElements(
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
- int* offset) {
- Zone* zone = this->zone();
-
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset) {
int elements_length = elements->length();
- HValue* object_elements_length =
- AddInstruction(new(zone) HConstant(elements_length));
+ HValue* object_elements_length = Add<HConstant>(elements_length);
BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
@@ -10200,7 +9460,7 @@ void HOptimizedGraphBuilder::BuildEmitElements(
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
BuildEmitFixedArray(elements, original_elements, kind, object_elements,
- target, offset);
+ target, offset, data_target, data_offset);
} else {
UNREACHABLE();
}
@@ -10211,17 +9471,16 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements) {
- Zone* zone = this->zone();
- HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
- elements, Representation::Tagged()));
+ HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
for (int i = 0; i < elements_length; i++) {
- HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+ HValue* key_constant = Add<HConstant>(i);
HInstruction* value_instruction =
- AddInstruction(new(zone) HLoadKeyed(
- boilerplate_elements, key_constant, NULL, kind, ALLOW_RETURN_HOLE));
- HInstruction* store = AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
+ Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind,
+ ALLOW_RETURN_HOLE);
+ HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
+ value_instruction, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
}
@@ -10233,34 +9492,32 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
- int* offset) {
- Zone* zone = this->zone();
- HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
- elements, Representation::Tagged()));
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset) {
+ HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
Handle<FixedArray> original_fast_elements =
Handle<FixedArray>::cast(original_elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
- HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+ HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
Handle<Object>(original_fast_elements->get(i), isolate()));
- HInstruction* value_instruction =
- AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
- AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
+ HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
+ *offset);
+ Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, DONT_TRACK_ALLOCATION_SITE);
+ offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
} else {
HInstruction* value_instruction =
- AddInstruction(new(zone) HLoadKeyed(
- boilerplate_elements, key_constant, NULL, kind,
- ALLOW_RETURN_HOLE));
- AddInstruction(new(zone) HStoreKeyed(
- object_elements, key_constant, value_instruction, kind));
+ Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind,
+ ALLOW_RETURN_HOLE);
+ Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
}
}
}
@@ -10282,12 +9539,10 @@ void HOptimizedGraphBuilder::VisitDeclarations(
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(info()->language_mode());
- HInstruction* result = new(zone()) HDeclareGlobals(
- environment()->LookupContext(), array, flags);
- AddInstruction(result);
+ int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ Add<HDeclareGlobals>(environment()->LookupContext(), array, flags);
globals_.Clear();
}
}
@@ -10317,9 +9572,8 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
if (hole_init) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+ HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
if (store->HasObservableSideEffects()) {
AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
}
@@ -10338,8 +9592,8 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
+ Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
+ declaration->fun(), current_info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
@@ -10356,9 +9610,8 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+ HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
if (store->HasObservableSideEffects()) {
AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
}
@@ -10530,8 +9783,7 @@ void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// function is blacklisted by AstNode::IsInlineable.
ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 0);
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
+ HInstruction* elements = Add<HArgumentsElements>(false);
HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10545,10 +9797,9 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* index = Pop();
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
- HInstruction* checked_index = AddBoundsCheck(index, length);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HInstruction* checked_index = Add<HBoundsCheck>(index, length);
HAccessArgumentsAt* result =
new(zone()) HAccessArgumentsAt(elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
@@ -10575,7 +9826,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
HDateField* result = new(zone()) HDateField(date, index);
@@ -10712,8 +9963,7 @@ void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// Fast support for Math.random().
void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>(context);
HRandom* result = new(zone()) HRandom(global_object);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10819,15 +10069,14 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
current_block()->Finish(typecheck);
set_current_block(if_jsfunction);
- HInstruction* invoke_result = AddInstruction(
- new(zone()) HInvokeFunction(context, function, arg_count));
+ HInstruction* invoke_result =
+ Add<HInvokeFunction>(context, function, arg_count);
Drop(arg_count);
Push(invoke_result);
if_jsfunction->Goto(join);
set_current_block(if_nonfunction);
- HInstruction* call_result = AddInstruction(
- new(zone()) HCallFunction(context, function, arg_count));
+ HInstruction* call_result = Add<HCallFunction>(context, function, arg_count);
Drop(arg_count);
Push(call_result);
if_nonfunction->Goto(join);
@@ -10940,6 +10189,13 @@ void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
+ CallRuntime* call) {
+ AddInstruction(new(zone()) HDebugBreak());
+ return ast_context()->ReturnValue(graph()->GetConstant0());
+}
+
+
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -11005,6 +10261,7 @@ HEnvironment::HEnvironment(HEnvironment* outer,
values_(arguments, zone),
frame_type_(frame_type),
parameter_count_(arguments),
+ specials_count_(0),
local_count_(0),
outer_(outer),
entry_(NULL),
@@ -11349,8 +10606,8 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
{
Tag HIR_tag(this, "HIR");
- HInstruction* instruction = current->first();
- while (instruction != NULL) {
+ for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
int bci = 0;
int uses = instruction->UseCount();
PrintIndent();
@@ -11359,7 +10616,6 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
trace_.Add(" ");
instruction->PrintTo(&trace_);
trace_.Add(" <|@\n");
- instruction = instruction->next();
}
}
@@ -11484,10 +10740,10 @@ void HStatistics::Print() {
}
for (int i = 0; i < names_.length(); ++i) {
- PrintF("%30s", names_[i]);
+ PrintF("%32s", names_[i]);
double ms = static_cast<double>(timing_[i]) / 1000;
double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %8.3f ms / %4.1f %% ", ms, percent);
+ PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
@@ -11497,21 +10753,21 @@ void HStatistics::Print() {
PrintF("----------------------------------------"
"---------------------------------------\n");
int64_t total = create_graph_ + optimize_graph_ + generate_code_;
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
static_cast<double>(create_graph_) / 1000,
static_cast<double>(create_graph_) * 100 / total);
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
"Optimize graph",
static_cast<double>(optimize_graph_) / 1000,
static_cast<double>(optimize_graph_) * 100 / total);
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
"Generate and install code",
static_cast<double>(generate_code_) / 1000,
static_cast<double>(generate_code_) * 100 / total);
PrintF("----------------------------------------"
"---------------------------------------\n");
- PrintF("%30s - %8.3f ms (%.1f times slower than full code gen)\n",
+ PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
"Total",
static_cast<double>(total) / 1000,
static_cast<double>(total) / full_code_gen_);
@@ -11523,99 +10779,34 @@ void HStatistics::Print() {
double normalized_size_in_kb = source_size_in_kb > 0
? total_size_ / 1024 / source_size_in_kb
: 0;
- PrintF("%30s - %8.3f ms %7.3f kB allocated\n",
+ PrintF("%32s %8.3f ms %7.3f kB allocated\n",
"Average per kB source",
normalized_time, normalized_size_in_kb);
}
void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
- if (name == HPhase::kFullCodeGen) {
- full_code_gen_ += ticks;
- } else {
- total_size_ += size;
- for (int i = 0; i < names_.length(); ++i) {
- if (strcmp(names_[i], name) == 0) {
- timing_[i] += ticks;
- sizes_[i] += size;
- return;
- }
+ total_size_ += size;
+ for (int i = 0; i < names_.length(); ++i) {
+ if (strcmp(names_[i], name) == 0) {
+ timing_[i] += ticks;
+ sizes_[i] += size;
+ return;
}
- names_.Add(name);
- timing_.Add(ticks);
- sizes_.Add(size);
- }
-}
-
-
-const char* const HPhase::kFullCodeGen = "Full code generator";
-
-
-HPhase::HPhase(const char* name, Isolate* isolate) {
- Init(isolate, name, NULL, NULL, NULL);
-}
-
-
-HPhase::HPhase(const char* name, HGraph* graph) {
- Init(graph->isolate(), name, graph, NULL, NULL);
-}
-
-
-HPhase::HPhase(const char* name, LChunk* chunk) {
- Init(chunk->isolate(), name, NULL, chunk, NULL);
-}
-
-
-HPhase::HPhase(const char* name, LAllocator* allocator) {
- Init(allocator->isolate(), name, NULL, NULL, allocator);
-}
-
-
-void HPhase::Init(Isolate* isolate,
- const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator) {
- isolate_ = isolate;
- name_ = name;
- graph_ = graph;
- chunk_ = chunk;
- allocator_ = allocator;
- if (allocator != NULL && chunk_ == NULL) {
- chunk_ = allocator->chunk();
- }
- if (FLAG_hydrogen_stats) {
- start_ticks_ = OS::Ticks();
- start_allocation_size_ = Zone::allocation_size_;
}
+ names_.Add(name);
+ timing_.Add(ticks);
+ sizes_.Add(size);
}
HPhase::~HPhase() {
- if (FLAG_hydrogen_stats) {
- int64_t ticks = OS::Ticks() - start_ticks_;
- unsigned size = Zone::allocation_size_ - start_allocation_size_;
- isolate_->GetHStatistics()->SaveTiming(name_, ticks, size);
- }
-
- // Produce trace output if flag is set so that the first letter of the
- // phase name matches the command line parameter FLAG_trace_phase.
- if (FLAG_trace_hydrogen &&
- OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
- if (graph_ != NULL) {
- isolate_->GetHTracer()->TraceHydrogen(name_, graph_);
- }
- if (chunk_ != NULL) {
- isolate_->GetHTracer()->TraceLithium(name_, chunk_);
- }
- if (allocator_ != NULL) {
- isolate_->GetHTracer()->TraceLiveRanges(name_, allocator_);
- }
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
}
#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify(false); // No full verify.
- if (allocator_ != NULL) allocator_->Verify();
+ graph_->Verify(false); // No full verify.
#endif
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index ad89e505a3..f80aca1e3c 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -46,6 +46,7 @@ class FunctionState;
class HEnvironment;
class HGraph;
class HLoopInformation;
+class HOsrBuilder;
class HTracer;
class LAllocator;
class LChunk;
@@ -66,6 +67,7 @@ class HBasicBlock: public ZoneObject {
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
+ HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
@@ -229,6 +231,19 @@ class HPredecessorIterator BASE_EMBEDDED {
};
+class HInstructionIterator BASE_EMBEDDED {
+ public:
+ explicit HInstructionIterator(HBasicBlock* block) : instr_(block->first()) { }
+
+ bool Done() { return instr_ == NULL; }
+ HInstruction* Current() { return instr_; }
+ void Advance() { instr_ = instr_->next(); }
+
+ private:
+ HInstruction* instr_;
+};
+
+
class HLoopInformation: public ZoneObject {
public:
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
@@ -260,6 +275,7 @@ class HLoopInformation: public ZoneObject {
HStackCheck* stack_check_;
};
+
class BoundsCheckTable;
class HGraph: public ZoneObject {
public:
@@ -281,8 +297,6 @@ class HGraph: public ZoneObject {
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
- void ComputeSafeUint32Operations();
- void GlobalValueNumbering();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
void Canonicalize();
@@ -345,24 +359,16 @@ class HGraph: public ZoneObject {
void Verify(bool do_full_verify) const;
#endif
- bool has_osr_loop_entry() {
- return osr_loop_entry_.is_set();
+ bool has_osr() {
+ return osr_ != NULL;
}
- HBasicBlock* osr_loop_entry() {
- return osr_loop_entry_.get();
+ void set_osr(HOsrBuilder* osr) {
+ osr_ = osr;
}
- void set_osr_loop_entry(HBasicBlock* entry) {
- osr_loop_entry_.set(entry);
- }
-
- ZoneList<HUnknownOSRValue*>* osr_values() {
- return osr_values_.get();
- }
-
- void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
- osr_values_.set(values);
+ HOsrBuilder* osr() {
+ return osr_;
}
int update_type_change_checksum(int delta) {
@@ -402,6 +408,12 @@ class HGraph: public ZoneObject {
}
void MarkDependsOnEmptyArrayProtoElements() {
+ // Add map dependency if not already added.
+ if (depends_on_empty_array_proto_elements_) return;
+ isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo(
+ DependentCode::kElementsCantBeAddedGroup, info());
+ isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo(
+ DependentCode::kElementsCantBeAddedGroup, info());
depends_on_empty_array_proto_elements_ = true;
}
@@ -409,7 +421,18 @@ class HGraph: public ZoneObject {
return depends_on_empty_array_proto_elements_;
}
+ bool has_uint32_instructions() {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+ return uint32_instructions_ != NULL;
+ }
+
+ ZoneList<HInstruction*>* uint32_instructions() {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+ return uint32_instructions_;
+ }
+
void RecordUint32Instruction(HInstruction* instr) {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
if (uint32_instructions_ == NULL) {
uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
}
@@ -420,6 +443,12 @@ class HGraph: public ZoneObject {
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
+ template<class Phase>
+ void Run() {
+ Phase phase(this);
+ phase.Run();
+ }
+
void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
void MarkLiveInstructions();
void RemoveDeadInstructions();
@@ -459,8 +488,7 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> constant_invalid_context_;
SetOncePointer<HArgumentsObject> arguments_object_;
- SetOncePointer<HBasicBlock> osr_loop_entry_;
- SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+ HOsrBuilder* osr_;
CompilationInfo* info_;
Zone* zone_;
@@ -693,25 +721,6 @@ class HEnvironment: public ZoneObject {
};
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
- void AddToWorklist(HValue* current);
-
- private:
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
class HOptimizedGraphBuilder;
enum ArgumentsAllowedFlag {
@@ -874,6 +883,11 @@ class FunctionState {
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
+ HArgumentsObject* arguments_object() { return arguments_object_; }
+ void set_arguments_object(HArgumentsObject* arguments_object) {
+ arguments_object_ = arguments_object;
+ }
+
HArgumentsElements* arguments_elements() { return arguments_elements_; }
void set_arguments_elements(HArgumentsElements* arguments_elements) {
arguments_elements_ = arguments_elements;
@@ -907,6 +921,7 @@ class FunctionState {
// entry.
HEnterInlined* entry_;
+ HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
FunctionState* outer_;
@@ -968,6 +983,7 @@ class HGraphBuilder {
Zone* zone() const { return info_->zone(); }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
+ CompilationInfo* top_info() { return info_; }
HGraph* CreateGraph();
@@ -977,9 +993,57 @@ class HGraphBuilder {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
+
+ template<class I>
+ I* Add() { return static_cast<I*>(AddInstruction(new(zone()) I())); }
+
+ template<class I, class P1>
+ I* Add(P1 p1) {
+ return static_cast<I*>(AddInstruction(new(zone()) I(p1)));
+ }
+
+ template<class I, class P1, class P2>
+ I* Add(P1 p1, P2 p2) {
+ return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2)));
+ }
+
+ template<class I, class P1, class P2, class P3>
+ I* Add(P1 p1, P2 p2, P3 p3) {
+ return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
+ return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3, p4)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3, p4, p5)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+ return static_cast<I*>(AddInstruction(
+ new(zone()) I(p1, p2, p3, p4, p5, p6)));
+ }
+
+ template<class I, class P1, class P2, class P3,
+ class P4, class P5, class P6, class P7>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+ return static_cast<I*>(AddInstruction(
+ new(zone()) I(p1, p2, p3, p4, p5, p6, p7)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7, class P8>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+ return static_cast<I*>(AddInstruction(
+ new(zone()) I(p1, p2, p3, p4, p5, p6, p7, p8)));
+ }
+
void AddSimulate(BailoutId id,
RemovableSimulate removable = FIXED_SIMULATE);
- HBoundsCheck* AddBoundsCheck(HValue* index, HValue* length);
HReturn* AddReturn(HValue* value);
@@ -997,7 +1061,7 @@ class HGraphBuilder {
HBasicBlock* CreateBasicBlock(HEnvironment* env);
HBasicBlock* CreateLoopHeaderBlock();
- HValue* BuildCheckNonSmi(HValue* object);
+ HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
@@ -1063,6 +1127,8 @@ class HGraphBuilder {
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
+ HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
+
class IfBuilder {
public:
explicit IfBuilder(HGraphBuilder* builder,
@@ -1077,8 +1143,7 @@ class HGraphBuilder {
HInstruction* IfCompare(
HValue* left,
HValue* right,
- Token::Value token,
- Representation input_representation = Representation::Integer32());
+ Token::Value token);
HInstruction* IfCompareMap(HValue* left, Handle<Map> map);
@@ -1110,10 +1175,9 @@ class HGraphBuilder {
HInstruction* OrIfCompare(
HValue* p1,
HValue* p2,
- Token::Value token,
- Representation input_representation = Representation::Integer32()) {
+ Token::Value token) {
Or();
- return IfCompare(p1, p2, token, input_representation);
+ return IfCompare(p1, p2, token);
}
HInstruction* OrIfCompareMap(HValue* left, Handle<Map> map) {
@@ -1136,10 +1200,9 @@ class HGraphBuilder {
HInstruction* AndIfCompare(
HValue* p1,
HValue* p2,
- Token::Value token,
- Representation input_representation = Representation::Integer32()) {
+ Token::Value token) {
And();
- return IfCompare(p1, p2, token, input_representation);
+ return IfCompare(p1, p2, token);
}
HInstruction* AndIfCompareMap(HValue* left, Handle<Map> map) {
@@ -1259,7 +1322,8 @@ class HGraphBuilder {
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
- bool disable_allocation_sites);
+ HValue* constructor_function,
+ AllocationSiteOverrideMode override_mode);
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
@@ -1275,9 +1339,6 @@ class HGraphBuilder {
int elements_size() const {
return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
}
- HInstruction* AddInstruction(HInstruction* instr) {
- return builder_->AddInstruction(instr);
- }
HGraphBuilder* builder() { return builder_; }
HGraph* graph() { return builder_->graph(); }
int initial_capacity() {
@@ -1350,8 +1411,7 @@ class HGraphBuilder {
void BuildCompareNil(
HValue* value,
- CompareNilICStub::Types types,
- Handle<Map> map,
+ Handle<Type> type,
int position,
HIfContinuation* continuation);
@@ -1370,7 +1430,6 @@ class HGraphBuilder {
int no_side_effects_scope_count_;
};
-
class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
@@ -1488,7 +1547,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void set_ast_context(AstContext* context) { ast_context_ = context; }
// Accessors forwarded to the function state.
- CompilationInfo* info() const {
+ CompilationInfo* current_info() const {
return function_state()->compilation_info();
}
AstContext* call_context() const {
@@ -1528,8 +1587,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void VisitArithmeticExpression(BinaryOperation* expr);
bool PreProcessOsrEntry(IterationStatement* statement);
- // True iff. we are compiling for OSR and the statement is the entry.
- bool HasOsrEntryAt(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info);
@@ -1613,8 +1670,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
- void AddPhi(HPhi* phi);
-
void PushAndAdd(HInstruction* instr);
// Remove the arguments from the bailout environment and emit instructions
@@ -1622,6 +1677,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
static Representation ToRepresentation(TypeInfo info);
+ static Representation ToRepresentation(Handle<Type> type);
void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements);
@@ -1658,7 +1714,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
bool TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
+ BailoutId id,
+ BailoutId assignment_id,
HValue* implicit_return_value);
bool TryInlineApply(Handle<JSFunction> function,
Call* expr,
@@ -1687,11 +1744,19 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* object,
SmallMapList* types,
Handle<String> name);
- bool HandlePolymorphicArrayLengthLoad(Property* expr,
+ HInstruction* TryLoadPolymorphicAsMonomorphic(Property* expr,
+ HValue* object,
+ SmallMapList* types,
+ Handle<String> name);
+ void HandlePolymorphicStoreNamedField(BailoutId id,
+ int position,
+ BailoutId assignment_id,
HValue* object,
+ HValue* value,
SmallMapList* types,
Handle<String> name);
- void HandlePolymorphicStoreNamedField(Assignment* expr,
+ bool TryStorePolymorphicAsMonomorphic(int position,
+ BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
@@ -1767,6 +1832,14 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
+ void BuildStoreNamed(Expression* expression,
+ BailoutId id,
+ int position,
+ BailoutId assignment_id,
+ Property* prop,
+ HValue* object,
+ HValue* value);
+
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
@@ -1801,13 +1874,16 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
Handle<JSObject> object,
- HInstruction* result,
+ HInstruction* target,
int* offset,
+ HInstruction* data_target,
+ int* data_offset,
AllocationSiteMode mode);
MUST_USE_RESULT HValue* BuildEmitObjectHeader(
Handle<JSObject> boilerplat_object,
HInstruction* target,
+ HInstruction* data_target,
int object_offset,
int elements_offset,
int elements_size);
@@ -1816,14 +1892,18 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Handle<JSObject> original_boilerplate_object,
HValue* object_properties,
HInstruction* target,
- int* offset);
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset);
void BuildEmitElements(Handle<FixedArrayBase> elements,
Handle<FixedArrayBase> original_elements,
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
- int* offset);
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
@@ -1834,7 +1914,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
- int* offset);
+ int* offset,
+ HInstruction* data_target,
+ int* data_offset);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -1866,9 +1948,12 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool inline_bailout_;
+ HOsrBuilder* osr_;
+
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
friend class KeyedLoadFastElementStub;
+ friend class HOsrBuilder;
DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
};
@@ -1894,6 +1979,10 @@ class HStatistics: public Malloced {
void Print();
void SaveTiming(const char* name, int64_t ticks, unsigned size);
+ void IncrementFullCodeGen(int64_t full_code_gen) {
+ full_code_gen_ += full_code_gen;
+ }
+
void IncrementSubtotals(int64_t create_graph,
int64_t optimize_graph,
int64_t generate_code) {
@@ -1915,30 +2004,20 @@ class HStatistics: public Malloced {
};
-class HPhase BASE_EMBEDDED {
+class HPhase : public CompilationPhase {
public:
- static const char* const kFullCodeGen;
-
- HPhase(const char* name, Isolate* isolate);
- HPhase(const char* name, HGraph* graph);
- HPhase(const char* name, LChunk* chunk);
- HPhase(const char* name, LAllocator* allocator);
+ HPhase(const char* name, HGraph* graph)
+ : CompilationPhase(name, graph->info()),
+ graph_(graph) { }
~HPhase();
- private:
- void Init(Isolate* isolate,
- const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator);
+ protected:
+ HGraph* graph() const { return graph_; }
- Isolate* isolate_;
- const char* name_;
+ private:
HGraph* graph_;
- LChunk* chunk_;
- LAllocator* allocator_;
- int64_t start_ticks_;
- unsigned start_allocation_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(HPhase);
};
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 2a0c920936..b6ef242a2c 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -132,6 +132,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -162,24 +163,22 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
@@ -259,8 +258,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
@@ -289,8 +288,8 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index c0b2abd512..7bb643a16d 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -36,7 +36,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "disassembler.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 5d11452890..353f265ab7 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -410,10 +410,10 @@ class Operand BASE_EMBEDDED {
RelocInfo::EXTERNAL_REFERENCE);
}
- static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
+ static Operand ForCell(Handle<Cell> cell) {
AllowDeferredHandleDereference embedding_raw_address;
return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
+ RelocInfo::CELL);
}
// Returns true if this Operand is a wrapper for the specified register.
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index bf4ee949ed..8aa6e4a603 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "deoptimizer.h"
@@ -447,6 +447,8 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Clear the context before we push it when entering the internal frame.
__ Set(esi, Immediate(0));
@@ -1015,427 +1017,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ mov(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(initial_capacity)));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- __ mov(scratch3, factory->the_hole_value());
- for (int i = 0; i < initial_capacity; i++) {
- __ mov(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ mov(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- factory->the_hole_value());
- __ bind(&entry);
- __ dec(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- ASSERT(scratch.is(edi)); // rep stos destination
- ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
- ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
-
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- array_size,
- REGISTER_VALUE_IS_SMI,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- Factory* factory = masm->isolate()->factory();
- __ mov(elements_array, factory->empty_fixed_array());
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- if (fill_with_hole) {
- __ SmiUntag(array_size);
- __ lea(edi, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, factory->the_hole_value());
- __ cld();
- // Do not use rep stos when filling less than kRepStosThreshold
- // words.
- const int kRepStosThreshold = 16;
- Label loop, entry, done;
- __ cmp(ecx, kRepStosThreshold);
- __ j(below, &loop); // Note: ecx > 0.
- __ rep_stos();
- __ jmp(&done);
- __ bind(&loop);
- __ stos();
- __ bind(&entry);
- __ cmp(edi, elements_array_end);
- __ j(below, &loop);
- __ bind(&done);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// edi: constructor (built-in Array function)
-// eax: argc
-// esp[0]: return address
-// esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array, finish, cant_transition_map, not_double;
-
- // Push the constructor and argc. No need to tag argc as a smi, as there will
- // be no garbage collection with this on the stack.
- int push_count = 0;
- if (construct_call) {
- push_count++;
- __ push(edi);
- }
- push_count++;
- __ push(eax);
-
- // Check for array construction with zero arguments.
- __ test(eax, eax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edi,
- &prepare_generic_code_call);
- __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(eax, 1);
- __ j(not_equal, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, ecx);
- __ j(not_zero, &not_empty_array);
-
- // The single argument passed is zero, so we jump to the code above used to
- // handle the case of no arguments passed. To adapt the stack for that we move
- // the return address and the pushed constructor (if pushed) one stack slot up
- // thereby removing the passed argument. Argc is also on the stack - at the
- // bottom - and it needs to be changed from 1 to 0 to have the call into the
- // runtime system work in case a GC is required.
- for (int i = push_count; i > 0; i--) {
- __ mov(eax, Operand(esp, i * kPointerSize));
- __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
- }
- __ Drop(2); // Drop two stack slots.
- __ push(Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
- __ j(not_zero, &prepare_generic_code_call);
-
- // Handle construction of an empty array of a certain size. Get the size from
- // the stack and bail out if size is to large to actually allocate an elements
- // array.
- __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
- __ j(greater_equal, &prepare_generic_code_call);
-
- // edx: array_size (smi)
- // edi: constructor
- // esp[0]: argc (cannot be 0 here)
- // esp[4]: constructor (only if construct_call)
- // esp[8]: return address
- // esp[C]: argument
- AllocateJSArray(masm,
- edi,
- ecx,
- ebx,
- eax,
- edx,
- edi,
- true,
- &prepare_generic_code_call);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(eax); // Convet argc to a smi.
- // eax: array_size (smi)
- // edi: constructor
- // esp[0] : argc
- // esp[4]: constructor (only if construct_call)
- // esp[8] : return address
- // esp[C] : last argument
- AllocateJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edx,
- edi,
- false,
- &prepare_generic_code_call);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ push(ebx);
- __ mov(ebx, Operand(esp, kPointerSize));
- // ebx: argc
- // edx: elements_array_end (untagged)
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
-
- // Location of the last argument
- int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArray is false, so the FixedArray is returned in ecx).
- __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- Label has_non_smi_element;
-
- // ebx: argc
- // edx: location of the first array element
- // edi: location of the last argument
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- Label loop, entry;
- __ mov(ecx, ebx);
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(eax, &has_non_smi_element);
- }
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // ebx: argc
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- __ bind(&finish);
- __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
- __ pop(eax);
- __ pop(ebx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size,
- last_arg_offset - kPointerSize));
- __ jmp(ecx);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- // Throw away the array that's only been partially constructed.
- __ pop(eax);
- __ UndoAllocationInNewSpace(eax);
- __ jmp(&prepare_generic_code_call);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- __ mov(ebx, Operand(esp, 0));
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- edi,
- eax,
- &cant_transition_map);
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
- __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Prepare to re-enter the loop
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-
- // Restore argc and constructor before running the generic code.
- __ bind(&prepare_generic_code_call);
- __ pop(eax);
- if (construct_call) {
- __ pop(edi);
- }
- __ jmp(call_generic_code);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1459,20 +1040,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code
- // cannot handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
- }
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1498,58 +1068,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code
- // cannot handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- ebx : type info cell
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(ebx, Immediate(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index ad1c65db2b..29a4be2140 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "bootstrapper.h"
#include "code-stubs.h"
@@ -1527,7 +1527,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNREACHABLE();
}
- if (op_ == Token::MOD && has_fixed_right_arg_) {
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
@@ -1669,7 +1669,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
FloatingPointHelper::CheckSSE2OperandIsInt32(
masm, &not_int32, xmm1, edi, ecx, xmm2);
if (op_ == Token::MOD) {
- if (has_fixed_right_arg_) {
+ if (encoded_right_arg_.has_value) {
__ cmp(edi, Immediate(fixed_right_arg_value()));
__ j(not_equal, &right_arg_changed);
}
@@ -4678,56 +4678,17 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // ebx : cache cell for call target
- // edi : the function to call
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &done, Label::kNear);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -4739,12 +4700,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Special handling of the Array() function, which caches not only the
// monomorphic Array function but the initial ElementsKind with special
// sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(ecx, &miss);
- __ cmp(ecx, Immediate(terminal_kind_sentinel));
- __ j(above, &miss);
+ if (FLAG_debug_code) {
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(ecx, Immediate(terminal_kind_sentinel));
+ __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
+ }
+
// Load the global or builtins object from the current context
__ LoadGlobalContext(ecx);
// Make sure the function is the Array() function
@@ -4762,7 +4726,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kNear);
@@ -4781,12 +4745,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> initial_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
GetInitialFastElementsKind());
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(initial_kind_sentinel));
__ jmp(&done);
__ bind(&not_array_function);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -4824,11 +4788,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Fast-case: Just invoke the function.
@@ -4857,7 +4817,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
@@ -4901,15 +4861,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
+ Register jmp_reg = ecx;
__ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -4955,9 +4911,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
- if (FLAG_optimize_constructed_arrays) {
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
- }
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@@ -5050,11 +5004,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label okay;
__ cmp(eax, masm->isolate()->factory()->the_hole_value());
__ j(not_equal, &okay, Label::kNear);
- // TODO(wingo): Currently SuspendJSGeneratorObject returns the hole. Change
- // to return another sentinel like a harmony symbol.
- __ cmp(ebx, Immediate(ExternalReference(
- Runtime::kSuspendJSGeneratorObject, masm->isolate())));
- __ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
}
@@ -5131,6 +5080,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// NOTE: Invocations of builtins may return failure objects instead
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
@@ -5204,6 +5155,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Set up frame.
__ push(ebp);
__ mov(ebp, esp);
@@ -6907,9 +6860,13 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
+ __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
+ __ cmpb(tmp1, kInternalizedTag | kStringTag);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
+ __ cmpb(tmp2, kInternalizedTag | kStringTag);
+ __ j(not_equal, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
@@ -6954,19 +6911,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- Label succeed1;
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1);
- __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ test(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2);
- __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed2);
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
@@ -7031,7 +6977,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both strings are internalized. If they are, we're done
// because we already know they are not identical. But in the case of
- // non-equality compare, we still need to determine the order.
+ // non-equality compare, we still need to determine the order. We
+ // also know they are both strings.
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag != 0);
@@ -7190,12 +7137,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Check if the entry name is not a unique name.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(not_zero, &good);
- __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, miss);
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
__ bind(&good);
}
@@ -7328,15 +7271,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// key we are looking for.
// Check if the entry name is not a unique name.
- Label cont;
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(not_zero, &cont);
- __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &maybe_in_dictionary);
- __ bind(&cont);
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -7661,11 +7598,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : element value to store
- // -- ebx : array literal
- // -- edi : map of array literal
// -- ecx : element index as smi
- // -- edx : array literal index in function
// -- esp[0] : return address
+ // -- esp[4] : array literal index in function
+ // -- esp[8] : array literal
+ // clobbers ebx, edx, edi
// -----------------------------------
Label element_done;
@@ -7675,6 +7612,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements_from_double;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+
__ CheckFastElements(edi, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
@@ -7756,7 +7698,11 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // It's always safe to call the entry hook stub, as the hook itself
+ // is not allowed to call back to V8.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -7764,9 +7710,11 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Ecx is the only volatile register we must save.
- const int kNumSavedRegisters = 1;
+ // Save volatile registers.
+ const int kNumSavedRegisters = 3;
+ __ push(eax);
__ push(ecx);
+ __ push(edx);
// Calculate and push the original stack pointer.
__ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
@@ -7779,12 +7727,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ push(eax);
// Call the entry hook.
- int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
- __ call(Operand(hook_location, RelocInfo::NONE32));
+ ASSERT(masm->isolate()->function_entry_hook() != NULL);
+ __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ RelocInfo::RUNTIME_ENTRY);
__ add(esp, Immediate(2 * kPointerSize));
// Restore ecx.
+ __ pop(edx);
__ pop(ecx);
+ __ pop(eax);
+
__ ret(0);
}
@@ -7842,6 +7794,10 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &normal_sequence);
+ // The type cell may have gone megamorphic, don't overwrite if so
+ __ mov(ecx, FieldOperand(ebx, kPointerSize));
+ __ JumpIfNotSmi(ecx, &normal_sequence);
+
// Save the resulting elements kind in type info
__ SmiTag(edx);
__ mov(FieldOperand(ebx, kPointerSize), edx);
@@ -7871,10 +7827,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind, false);
+ T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, true);
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7930,63 +7886,49 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
- // We should either have undefined in ebx or a valid jsglobalpropertycell
+ // We should either have undefined in ebx or a valid cell
Label okay_here;
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
+ __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
__ Assert(equal, "Expected property cell in register ebx");
__ bind(&okay_here);
}
- if (FLAG_optimize_constructed_arrays) {
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &no_info);
- __ mov(edx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
- __ JumpIfNotSmi(edx, &no_info);
- __ SmiUntag(edx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(edx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
+ Label no_info, switch_ready;
+ // Get the elements kind and case on that.
+ __ cmp(ebx, Immediate(undefined_sentinel));
+ __ j(equal, &no_info);
+ __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
+ __ JumpIfNotSmi(edx, &no_info);
+ __ SmiUntag(edx);
+ __ jmp(&switch_ready);
+ __ bind(&no_info);
+ __ mov(edx, Immediate(GetInitialFastElementsKind()));
+ __ bind(&switch_ready);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+ } else {
+ UNREACHABLE();
}
}
@@ -8049,46 +7991,33 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(equal, "Unexpected initial map for Array function");
}
- if (FLAG_optimize_constructed_arrays) {
- // Figure out the right elements kind
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ shr(ecx, Map::kElementsKindShift);
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ecx, Map::kElementsKindMask);
+ __ shr(ecx, Map::kElementsKindShift);
- if (FLAG_debug_code) {
- Label done;
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(equal, &done);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ Assert(equal,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
- __ bind(&done);
- }
-
- Label fast_elements_case;
+ if (FLAG_debug_code) {
+ Label done;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
- __ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
}
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index d562238893..da32c504fc 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "heap.h"
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 2d83cab2d5..77ff169b52 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -33,7 +33,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "cpu.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index a4c6bcc679..db1d5a612a 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "debug.h"
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 16befa910c..6af2445f45 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "deoptimizer.h"
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 9eb0d292c7..14e580069f 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -31,7 +31,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "disasm.h"
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index ea19e9f6ba..5570811768 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -27,22 +27,17 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "assembler.h"
#include "assembler-ia32.h"
#include "assembler-ia32-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index c77faaad80..cf3132d33f 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "code-stubs.h"
#include "codegen.h"
@@ -118,7 +118,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -315,7 +315,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(ebx, Immediate(profiling_counter_));
- __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ sub(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(delta)));
}
@@ -327,7 +327,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = Smi::kMaxValue;
}
__ mov(ebx, Immediate(profiling_counter_));
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
@@ -342,7 +342,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -384,7 +384,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -1105,14 +1105,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
@@ -1641,10 +1639,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(ecx, Immediate(key->handle()));
+ __ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1784,13 +1782,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(eax);
+ __ push(eax); // array literal.
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1799,7 +1795,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
__ mov(FieldOperand(ebx, offset), result_register());
@@ -1810,10 +1806,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ mov(ecx, Immediate(Smi::FromInt(i)));
- __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1822,6 +1815,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ add(esp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -1950,22 +1944,38 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::INITIAL:
- case Yield::SUSPEND: {
- VisitForStackValue(expr->generator_object());
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
-
- Label resume;
- __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &resume);
- if (expr->yield_kind() == Yield::SUSPEND) {
- EmitReturnIteratorResult(false);
- } else {
- __ pop(result_register());
- EmitReturnSequence();
- }
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1977,7 +1987,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- EmitReturnIteratorResult(true);
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
break;
}
@@ -1988,74 +2001,69 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
// Initial send value is undefined.
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&l_next);
- // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
- __ push(edx); // iter
- __ push(eax); // exception
__ mov(ecx, isolate()->factory()->throw_string()); // "throw"
- Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(throw_ic); // iter.throw in eax
+ __ push(ecx); // "throw"
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // exception
__ jmp(&l_call);
- // try { received = yield result.value }
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
__ bind(&l_try);
- __ pop(eax); // result.value
+ __ pop(eax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(eax); // result.value
- __ push(Operand(esp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ push(eax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ mov(eax, Operand(esp, generator_object_depth));
+ __ push(eax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(l_continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CompareRoot(eax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &l_resume);
- EmitReturnIteratorResult(false);
+ __ pop(eax); // result
+ EmitReturnSequence();
__ bind(&l_resume); // received in eax
__ PopTryHandler();
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
- __ push(edx); // iter
- __ push(eax); // received
__ mov(ecx, isolate()->factory()->next_string()); // "next"
- Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(next_ic); // iter.next in eax
+ __ push(ecx);
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // received
- // result = f.call(receiver, arg);
+ // result = receiver[f](arg);
__ bind(&l_call);
- Label l_call_runtime;
- __ JumpIfSmi(eax, &l_call_runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &l_call_runtime);
- __ mov(edi, eax);
- ParameterCount count(1);
- __ InvokeFunction(edi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&l_loop);
- __ bind(&l_call_runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCall, 3);
+ __ Drop(1); // The key is still on the stack; drop it.
- // val = result.value; if (!result.done) goto l_try;
+ // if (!result.done) goto l_try;
__ bind(&l_loop);
- // result.value
__ push(eax); // save result
__ mov(edx, eax); // result
- __ mov(ecx, isolate()->factory()->value_string()); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in eax
- __ pop(ebx); // result
- __ push(eax); // result.value
- __ mov(edx, ebx); // result
__ mov(ecx, isolate()->factory()->done_string()); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in eax
@@ -2065,7 +2073,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(zero, &l_try);
// result.value
- __ pop(eax); // result.value
+ __ pop(edx); // result
+ __ mov(ecx, isolate()->factory()->value_string()); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in eax
context()->DropAndPlug(2, eax); // drop iter and g
break;
}
@@ -2105,7 +2116,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ mov(ecx, isolate()->factory()->the_hole_value());
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ sub(edx, Immediate(1));
+ __ sub(edx, Immediate(Smi::FromInt(1)));
__ j(carry, &push_frame);
__ push(ecx);
__ jmp(&push_argument_holes);
@@ -2169,13 +2180,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
-void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ mov(ebx, map);
@@ -2194,34 +2212,14 @@ void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
// root set.
__ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
ecx, edx, kDontSaveFPRegs);
-
- if (done) {
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- }
-
- EmitReturnSequence();
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- ASSERT(!key->handle()->IsSmi());
- __ mov(ecx, Immediate(key->handle()));
+ ASSERT(!key->value()->IsSmi());
+ __ mov(ecx, Immediate(key->value()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -2375,7 +2373,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2502,7 +2500,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -2637,8 +2635,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(ebx, cell);
@@ -2768,7 +2765,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2822,8 +2819,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(ebx, cell);
@@ -3387,7 +3383,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3816,7 +3812,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
@@ -4528,7 +4524,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 95c7c029d6..eb6ccd90e1 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "ic-inl.h"
@@ -317,7 +317,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
__ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string);
- // Is the string internalized?
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
STATIC_ASSERT(kInternalizedTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
__ j(zero, not_unique);
@@ -1482,8 +1483,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 7d685bff32..defae1c162 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
#include "ic.h"
@@ -74,7 +74,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
@@ -109,20 +109,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
- for (int i = 0 ; i < transition_maps_.length(); i++) {
- transition_maps_.at(i)->AddDependentCode(
- DependentCode::kTransitionGroup, code);
- }
- if (graph()->depends_on_empty_array_proto_elements()) {
- isolate()->initial_object_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- isolate()->initial_array_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- }
+ info()->CommitDependencies(code);
}
@@ -627,27 +614,15 @@ Operand LCodeGen::HighOperand(LOperand* op) {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
+ WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
@@ -680,60 +655,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
UNREACHABLE();
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
}
+ continue;
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
+ environment->HasUint32ValueAt(i));
}
}
@@ -741,17 +685,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ bool is_uint32) {
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -877,8 +812,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -886,7 +819,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -941,7 +874,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ popfd();
}
- if (FLAG_trap_on_deopt) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
@@ -1229,7 +1162,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ // Record the address of the first unknown OSR value as the place to enter.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
@@ -1263,12 +1197,12 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(left_reg, divisor - 1);
__ bind(&done);
- } else if (hmod->has_fixed_right_arg()) {
+ } else if (hmod->fixed_right_arg().has_value) {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- int32_t divisor = hmod->fixed_right_arg_value();
+ int32_t divisor = hmod->fixed_right_arg().value;
ASSERT(IsPowerOf2(divisor));
// Check if our assumption of a fixed right operand still holds.
@@ -1839,14 +1773,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoFixedArrayBaseLength(
- LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
@@ -1876,8 +1802,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
ASSERT(input.is(result));
Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done, Label::kNear);
+ }
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
@@ -2127,10 +2056,12 @@ int LCodeGen::GetNextEmittedBlock() const {
}
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
int next_block = GetNextEmittedBlock();
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
@@ -2146,22 +2077,19 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -2169,43 +2097,52 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (type.IsBoolean()) {
ASSERT(!info()->IsStub());
__ cmp(reg, factory()->true_value());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
} else if (type.IsSmi()) {
ASSERT(!info()->IsStub());
__ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ CpuFeatureScope scope(masm(), SSE2);
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
+ __ j(equal, instr->TrueLabel(chunk_));
// false -> false.
__ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
@@ -2222,14 +2159,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
+ __ j(not_zero, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
+ __ j(above_equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2238,15 +2175,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
- __ j(equal, true_label);
+ __ j(equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -2264,13 +2201,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
__ FCmp();
}
- __ j(zero, false_label);
- __ jmp(true_label);
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
@@ -2278,7 +2218,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) {
if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -2319,17 +2259,14 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2337,7 +2274,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ __ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
int32_t const_value = ToInteger32(LConstantOperand::cast(right));
@@ -2359,15 +2296,13 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ cmp(ToRegister(left), ToOperand(right));
}
}
- EmitBranch(true_block, false_block, cc);
+ EmitBranch(instr, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->right()->IsConstantOperand()) {
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
@@ -2376,17 +2311,15 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Operand right = ToOperand(instr->right());
__ cmp(left, right);
}
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmp(left, instr->hydrogen()->right());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2417,21 +2350,20 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ Condition true_cond = EmitIsObject(
+ reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
@@ -2443,24 +2375,22 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond = EmitIsString(reg, temp, false_label);
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ test(input, Immediate(kSmiTagMask));
- EmitBranch(true_block, false_block, zero);
+ EmitBranch(instr, zero);
}
@@ -2468,15 +2398,14 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
}
@@ -2502,8 +2431,6 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2511,7 +2438,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Condition condition = ComputeCompareCondition(op);
__ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -2539,15 +2466,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2566,12 +2490,9 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2647,25 +2568,17 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
__ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2719,9 +2632,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = ToRegister(instr->temp());
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
@@ -2903,7 +2815,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -2926,19 +2838,19 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
+ __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(Operand::Cell(cell_handle), value);
+ __ mov(Operand::ForCell(cell_handle), value);
// Cells are always rescanned, so no write barrier here.
}
@@ -2993,9 +2905,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context,
@@ -4238,12 +4150,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(ebx, Immediate(undefined_value));
- }
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ mov(ebx, Immediate(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4254,22 +4163,42 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(FLAG_optimize_constructed_arrays);
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
- bool disable_allocation_sites =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ mov(ecx, Operand(esp, 0));
+ __ test(ecx, ecx);
+ __ j(zero, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4331,9 +4260,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
if (!transition.is_null()) {
- if (transition->CanBeDeprecated()) {
- transition_maps_.Add(transition, info()->zone());
- }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
@@ -4353,9 +4279,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register write_register = object;
if (!access.IsInobject()) {
@@ -4592,9 +4518,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
__ RecordWrite(elements,
@@ -4937,7 +4863,8 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg, xmm1);
+ __ LoadUint32(xmm0, reg,
+ ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
// int manually.
@@ -5735,9 +5662,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
}
@@ -5790,9 +5719,8 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Handle<JSFunction> target = instr->hydrogen()->target();
if (instr->hydrogen()->target_in_new_space()) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ cmp(reg, Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ __ cmp(reg, Operand::ForCell(cell));
} else {
Operand operand = ToOperand(instr->value());
__ cmp(operand, target);
@@ -6009,11 +5937,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
@@ -6022,6 +5946,95 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+ TAG_OBJECT);
+
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(scratch, constructor);
+ __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+ instance_size >> kPointerSizeLog2);
+ __ Assert(equal, "Unexpected instance size");
+ __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+ initial_map->pre_allocated_property_fields());
+ __ Assert(equal, "Unexpected pre-allocated property fields count");
+ __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+ initial_map->unused_property_fields());
+ __ Assert(equal, "Unexpected unused property fields count");
+ __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+ initial_map->inobject_properties());
+ __ Assert(equal, "Unexpected in-object property fields count");
+ }
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ mov(FieldOperand(result, JSObject::kMapOffset), map);
+ __ mov(scratch, factory()->empty_fixed_array());
+ __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ if (initial_map->inobject_properties() != 0) {
+ __ mov(scratch, factory()->undefined_value());
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ mov(FieldOperand(result, property_offset), scratch);
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ push(Immediate(Smi::FromInt(instance_size)));
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -6185,15 +6198,12 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ input, instr->type_literal());
if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -6274,11 +6284,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -6424,15 +6432,15 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ // Normally we record the first unknown OSR value as the entrypoint to the OSR
+ // code, but if there were none, record the entrypoint here.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 647dd0e4c0..d05da8a084 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -58,8 +58,6 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -85,7 +83,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
- // TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -159,6 +156,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -170,10 +168,7 @@ class LCodeGen BASE_EMBEDDED {
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
void EnsureRelocSpaceForDeoptimization();
@@ -287,10 +282,7 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -325,7 +317,8 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
Register temp,
@@ -364,7 +357,8 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
@@ -409,8 +403,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 3da8f320d0..86bfe2fbf2 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 325ed2c7fd..8231c4e8b7 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
@@ -43,31 +43,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
@@ -376,8 +351,7 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
+ ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -481,7 +455,7 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
@@ -990,7 +964,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -1001,13 +975,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
@@ -1019,6 +995,21 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -1043,20 +1034,28 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new(zone()) LGoto(successor->block_id());
}
- // Untagged integers or doubles, smis and booleans don't require a
- // deoptimization environment nor a temp register.
+ ToBooleanStub::Types expected = instr->expected_input_types();
+
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
return new(zone()) LBranch(UseRegister(value), NULL);
}
- ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+
+ // The Generic stub does not have a deopt, so we need no environment.
+ if (expected.IsGeneric()) {
+ return new(zone()) LBranch(UseRegister(value), temp);
+ }
+
// We need a temporary register when we have to access the map *or* we have
// no type info yet, in which case we handle all cases (including the ones
// involving maps).
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
}
@@ -1350,7 +1349,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
@@ -1453,19 +1451,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
@@ -1539,7 +1524,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->has_fixed_right_arg()) {
+ } else if (instr->fixed_right_arg().has_value) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegisterAtStart(right),
NULL);
@@ -1831,13 +1816,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -2020,7 +1998,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
+ LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
+ : NULL;
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -2052,7 +2032,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
@@ -2544,6 +2524,15 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* temp = TempRegister();
+ LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
@@ -2730,8 +2719,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->inlining_kind(),
instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index e43672cdd9..e48e881eb5 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -44,6 +44,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -93,7 +94,6 @@ class LCodeGen;
V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
- V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -482,17 +482,44 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
@@ -867,6 +894,7 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -899,6 +927,7 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1206,7 +1235,7 @@ class LBranch: public LControlInstruction<1, 1> {
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LCmpMapAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1217,29 +1246,7 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
@@ -2048,13 +2055,15 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2548,6 +2557,22 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LAllocateObject(LOperand* context, LOperand* temp) {
+ inputs_[0] = context;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2656,26 +2681,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry();
+ LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2813,7 +2822,6 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 38b02a52c4..a9a0268aef 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -27,10 +27,11 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "runtime.h"
#include "serialize.h"
@@ -1973,6 +1974,8 @@ void MacroAssembler::PrepareCallApiFunction(int argc, bool returns_handle) {
void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset) {
@@ -1998,8 +2001,26 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
+ cmpb(Operand(eax, 0), 0);
+ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ mov(thunk_last_arg, Immediate(function_address));
+ // Call the api function.
+ call(thunk_address, RelocInfo::RUNTIME_ENTRY);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
// Call the api function.
call(function_address, RelocInfo::RUNTIME_ENTRY);
+ bind(&end_profiler_check);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -2495,9 +2516,8 @@ void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ mov(result, Operand::ForCell(cell));
} else {
mov(result, object);
}
@@ -2507,9 +2527,8 @@ void MacroAssembler::LoadHeapObject(Register result,
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- cmp(reg, Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ cmp(reg, Operand::ForCell(cell));
} else {
cmp(reg, object);
}
@@ -2519,9 +2538,8 @@ void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- push(Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ push(Operand::ForCell(cell));
} else {
Push(object);
}
@@ -2790,6 +2808,17 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
}
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+ cmp(operand, Immediate(kInternalizedTag));
+ j(less, not_unique_name, distance);
+ cmp(operand, Immediate(SYMBOL_TYPE));
+ j(greater, not_unique_name, distance);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 8380507ec0..5cb8286bae 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -784,6 +784,8 @@ class MacroAssembler: public Assembler {
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
void CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_ebp);
@@ -882,6 +884,15 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* on_not_flat_ascii_strings);
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ }
+
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 9a166d7d48..f478e574f5 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
+#include "cpu-profiler.h"
#include "unicode.h"
#include "log.h"
#include "regexp-stack.h"
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 3906623a58..28e043d641 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ic-inl.h"
#include "codegen.h"
@@ -503,7 +503,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
STATIC_ASSERT(kFastApiCallArguments == 6);
__ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
- const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
+
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -517,20 +522,26 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace, returns_handle);
// v8::Arguments::implicit_args_.
- __ mov(ApiParameterOperand(1, returns_handle), eax);
+ __ mov(ApiParameterOperand(2, returns_handle), eax);
__ add(eax, Immediate(argc * kPointerSize));
// v8::Arguments::values_.
- __ mov(ApiParameterOperand(2, returns_handle), eax);
+ __ mov(ApiParameterOperand(3, returns_handle), eax);
// v8::Arguments::length_.
- __ Set(ApiParameterOperand(3, returns_handle), Immediate(argc));
+ __ Set(ApiParameterOperand(4, returns_handle), Immediate(argc));
// v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4, returns_handle), Immediate(0));
+ __ Set(ApiParameterOperand(5, returns_handle), Immediate(0));
// v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1, returns_handle));
+ __ lea(eax, ApiParameterOperand(2, returns_handle));
__ mov(ApiParameterOperand(0, returns_handle), eax);
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeInvocationCallback)
+ : FUNCTION_ADDR(&InvokeFunctionCallback);
+
__ CallApiFunctionAndReturn(function_address,
+ thunk_address,
+ ApiParameterOperand(1, returns_handle),
argc + kFastApiCallArguments + 1,
returns_handle,
kFastApiCallArguments + 1);
@@ -753,16 +764,16 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<Name> name,
Register scratch,
Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
+ Handle<PropertyCell> cell =
GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (Serializer::enabled()) {
__ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(the_hole));
} else {
- __ cmp(Operand::Cell(cell), Immediate(the_hole));
+ __ cmp(Operand::ForCell(cell), Immediate(the_hole));
}
__ j(not_equal, miss);
}
@@ -839,8 +850,14 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register storage_reg = name_reg;
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ if (details.type() == CONSTANT_FUNCTION) {
+ Handle<HeapObject> constant(
+ HeapObject::cast(descriptors->GetValue(descriptor)));
+ __ LoadHeapObject(scratch1, constant);
+ __ cmp(value_reg, scratch1);
+ __ j(not_equal, miss_restore_name);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
@@ -884,7 +901,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (object->map()->unused_property_fields() == 0) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch1); // Return address.
@@ -913,6 +931,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
+ if (details.type() == CONSTANT_FUNCTION) {
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
@@ -1406,7 +1430,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
- const int kApiArgc = 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
bool returns_handle =
@@ -1422,7 +1448,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// garbage collection but instead return the allocation failure
// object.
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeAccessorGetter)
+ : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+
__ CallApiFunctionAndReturn(getter_address,
+ thunk_address,
+ ApiParameterOperand(2, returns_handle),
kStackSpace,
returns_handle,
6);
@@ -1565,15 +1597,15 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+ __ mov(edi, FieldOperand(edi, Cell::kValueOffset));
} else {
- __ mov(edi, Operand::Cell(cell));
+ __ mov(edi, Operand::ForCell(cell));
}
// Check that the cell contains the same function.
@@ -1664,12 +1696,59 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ // Get the receiver from the stack.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+ Handle<Cell> kind_feedback_cell =
+ isolate()->factory()->NewCell(kind);
+ __ mov(eax, Immediate(argc));
+ __ mov(ebx, kind_feedback_cell);
+ __ mov(edi, function);
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1918,16 +1997,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2000,16 +2080,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -2084,16 +2165,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -2170,16 +2252,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -2246,16 +2329,17 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2377,16 +2461,17 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2483,7 +2568,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2491,7 +2576,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
@@ -2673,8 +2758,9 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, Handle<String>::cast(name));
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT_FUNCTION);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2752,7 +2838,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
// ----------- S t a t e -------------
@@ -2765,7 +2851,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name));
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2935,7 +3022,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name) {
Label miss;
@@ -2947,7 +3034,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Compute the cell operand to use.
__ mov(scratch1(), Immediate(cell));
Operand cell_operand =
- FieldOperand(scratch1(), JSGlobalPropertyCell::kValueOffset);
+ FieldOperand(scratch1(), PropertyCell::kValueOffset);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
@@ -3108,7 +3195,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label success, miss;
@@ -3119,9 +3206,9 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(eax, Immediate(cell));
- __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+ __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
} else {
- __ mov(eax, Operand::Cell(cell));
+ __ mov(eax, Operand::ForCell(cell));
}
// Check for deleted property if property can actually be deleted.
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 94e8773a16..ff3a94d18c 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -144,7 +144,7 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
- pc_address_ = pc_address;
+ pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
}
@@ -686,7 +686,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(
+ Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
Handle<JSFunction> function(JSFunction::cast(cell->value()));
@@ -936,15 +936,7 @@ MaybeObject* LoadIC::Load(State state,
}
// Update inline cache and stub cache.
- if (FLAG_use_ic) {
- if (!object->IsJSObject()) {
- // TODO(jkummerow): It would be nice to support non-JSObjects in
- // UpdateCaches, then we wouldn't need to go generic here.
- set_target(*generic_stub());
- } else {
- UpdateCaches(&lookup, state, object, name);
- }
- }
+ if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
PropertyAttributes attr;
if (lookup.IsInterceptor() || lookup.IsHandler()) {
@@ -1204,11 +1196,17 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
Handle<Object> object,
Handle<String> name) {
// Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) return;
+ if (!lookup->IsCacheable()) {
+ set_target(*generic_stub());
+ return;
+ }
- // Loading properties from values is not common, so don't try to
- // deal with non-JS objects here.
- if (!object->IsJSObject()) return;
+ // TODO(jkummerow): It would be nice to support non-JSObjects in
+ // UpdateCaches, then we wouldn't need to go generic here.
+ if (!object->IsJSObject()) {
+ set_target(*generic_stub());
+ return;
+ }
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Code> code;
@@ -1219,7 +1217,10 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
code = pre_monomorphic_stub();
} else {
code = ComputeLoadHandler(lookup, receiver, name);
- if (code.is_null()) return;
+ if (code.is_null()) {
+ set_target(*generic_stub());
+ return;
+ }
}
PatchCache(state, kNonStrictMode, receiver, name, code);
@@ -1257,7 +1258,7 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
case NORMAL:
if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(
+ Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
return isolate()->stub_cache()->ComputeLoadGlobal(
name, receiver, global, cell, lookup->IsDontDelete());
@@ -1640,6 +1641,12 @@ MaybeObject* StoreIC::Store(State state,
IsUndeclaredGlobal(object)) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
+ } else if (FLAG_use_ic &&
+ (lookup.IsNormal() ||
+ (lookup.IsField() && lookup.CanHoldValue(value)))) {
+ Handle<Code> stub = strict_mode == kStrictMode
+ ? generic_stub_strict() : generic_stub();
+ set_target(*stub);
}
// Set the property.
@@ -1660,9 +1667,14 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// These are not cacheable, so we never see such LookupResults here.
ASSERT(!lookup->IsHandler());
- Handle<Code> code =
- ComputeStoreMonomorphic(lookup, strict_mode, receiver, name);
- if (code.is_null()) return;
+ Handle<Code> code = ComputeStoreMonomorphic(
+ lookup, strict_mode, receiver, name);
+ if (code.is_null()) {
+ Handle<Code> stub = strict_mode == kStrictMode
+ ? generic_stub_strict() : generic_stub();
+ set_target(*stub);
+ return;
+ }
PatchCache(state, strict_mode, receiver, name, code);
TRACE_IC("StoreIC", name, state, target());
@@ -1684,7 +1696,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<JSGlobalPropertyCell> cell(
+ Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
return isolate()->stub_cache()->ComputeStoreGlobal(
name, global, cell, strict_mode);
@@ -1733,7 +1745,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
DescriptorArray* target_descriptors = transition->instance_descriptors();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
- if (details.type() != FIELD || details.attributes() != NONE) break;
+ if (details.type() == CALLBACKS || details.attributes() != NONE) break;
return isolate()->stub_cache()->ComputeStoreTransition(
name, receiver, lookup, transition, strict_mode);
@@ -2099,7 +2111,7 @@ Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
DescriptorArray* target_descriptors = transition->instance_descriptors();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
- if (details.type() == FIELD && details.attributes() == NONE) {
+ if (details.type() != CALLBACKS && details.attributes() == NONE) {
return isolate()->stub_cache()->ComputeKeyedStoreTransition(
name, receiver, lookup, transition, strict_mode);
}
@@ -2408,20 +2420,37 @@ const char* UnaryOpIC::GetName(TypeInfo type_info) {
UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
+ return v8::internal::UNINITIALIZED;
case SMI:
case NUMBER:
return MONOMORPHIC;
case GENERIC:
- return ::v8::internal::GENERIC;
+ return v8::internal::GENERIC;
}
UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
+ return v8::internal::UNINITIALIZED;
+}
+
+
+Handle<Type> UnaryOpIC::TypeInfoToType(TypeInfo type_info, Isolate* isolate) {
+ switch (type_info) {
+ case UNINITIALIZED:
+ return handle(Type::None(), isolate);
+ case SMI:
+ return handle(Type::Smi(), isolate);
+ case NUMBER:
+ return handle(Type::Number(), isolate);
+ case GENERIC:
+ return handle(Type::Any(), isolate);
+ }
+ UNREACHABLE();
+ return handle(Type::Any(), isolate);
}
+
UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
- ::v8::internal::TypeInfo operand_type =
- ::v8::internal::TypeInfo::TypeFromValue(operand);
+ v8::internal::TypeInfo operand_type =
+ v8::internal::TypeInfo::FromValue(operand);
if (operand_type.IsSmi()) {
return SMI;
} else if (operand_type.IsNumber()) {
@@ -2489,6 +2518,46 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
}
+Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
+ Isolate* isolate) {
+ switch (binary_type) {
+ case UNINITIALIZED:
+ return handle(Type::None(), isolate);
+ case SMI:
+ return handle(Type::Smi(), isolate);
+ case INT32:
+ return handle(Type::Signed32(), isolate);
+ case NUMBER:
+ return handle(Type::Number(), isolate);
+ case ODDBALL:
+ return handle(Type::Optional(
+ handle(Type::Union(
+ handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate)), isolate);
+ case STRING:
+ return handle(Type::String(), isolate);
+ case GENERIC:
+ return handle(Type::Any(), isolate);
+ }
+ UNREACHABLE();
+ return handle(Type::Any(), isolate);
+}
+
+
+void BinaryOpIC::StubInfoToType(int minor_key,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Isolate* isolate) {
+ TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
+ BinaryOpStub::decode_types_from_minor_key(
+ minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
+ *left = TypeInfoToType(left_typeinfo, isolate);
+ *right = TypeInfoToType(right_typeinfo, isolate);
+ *result = TypeInfoToType(result_typeinfo, isolate);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
@@ -2545,8 +2614,7 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
Token::Value op) {
- ::v8::internal::TypeInfo type =
- ::v8::internal::TypeInfo::TypeFromValue(value);
+ v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
if (type.IsSmi()) return BinaryOpIC::SMI;
if (type.IsInteger32()) {
if (kSmiValueSize == 32) return BinaryOpIC::SMI;
@@ -2585,11 +2653,10 @@ static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
#ifdef DEBUG
static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
BinaryOpIC::TypeInfo right,
- bool has_fixed_right_arg,
- int32_t fixed_right_arg_value,
+ Maybe<int32_t> fixed_right_arg,
BinaryOpIC::TypeInfo result) {
PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
- if (has_fixed_right_arg) PrintF("{%d}", fixed_right_arg_value);
+ if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
PrintF("->%s", BinaryOpIC::GetName(result));
}
#endif
@@ -2621,10 +2688,8 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
- bool previous_has_fixed_right_arg =
- BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(key);
- int previous_fixed_right_arg_value =
- BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(key);
+ Maybe<int> previous_fixed_right_arg =
+ BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
int32_t value;
bool new_has_fixed_right_arg =
@@ -2632,11 +2697,12 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
right->ToInt32(&value) &&
BinaryOpStub::can_encode_arg_value(value) &&
(previous_overall == BinaryOpIC::UNINITIALIZED ||
- (previous_has_fixed_right_arg &&
- previous_fixed_right_arg_value == value));
- int32_t new_fixed_right_arg_value = new_has_fixed_right_arg ? value : 1;
+ (previous_fixed_right_arg.has_value &&
+ previous_fixed_right_arg.value == value));
+ Maybe<int32_t> new_fixed_right_arg(
+ new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
- if (previous_has_fixed_right_arg == new_has_fixed_right_arg) {
+ if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
@@ -2660,8 +2726,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
}
- BinaryOpStub stub(key, new_left, new_right, result_type,
- new_has_fixed_right_arg, new_fixed_right_arg_value);
+ BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
Handle<Code> code = stub.GetCode(isolate);
if (!code.is_null()) {
#ifdef DEBUG
@@ -2669,11 +2734,10 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
PrintF("[BinaryOpIC in ");
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
PrintF(" ");
- TraceBinaryOp(previous_left, previous_right, previous_has_fixed_right_arg,
- previous_fixed_right_arg_value, previous_result);
+ TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
+ previous_result);
PrintF(" => ");
- TraceBinaryOp(new_left, new_right, new_has_fixed_right_arg,
- new_fixed_right_arg_value, result_type);
+ TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
}
#endif
@@ -2767,52 +2831,96 @@ const char* CompareIC::GetStateName(State state) {
case OBJECT: return "OBJECT";
case KNOWN_OBJECT: return "KNOWN_OBJECT";
case GENERIC: return "GENERIC";
- default:
- UNREACHABLE();
- return NULL;
}
+ UNREACHABLE();
+ return NULL;
}
-static CompareIC::State InputState(CompareIC::State old_state,
- Handle<Object> value) {
- switch (old_state) {
+Handle<Type> CompareIC::StateToType(
+ Isolate* isolate,
+ CompareIC::State state,
+ Handle<Map> map) {
+ switch (state) {
case CompareIC::UNINITIALIZED:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::NUMBER;
- if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
- if (value->IsString()) return CompareIC::STRING;
- if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
- if (value->IsJSObject()) return CompareIC::OBJECT;
- break;
+ return handle(Type::None(), isolate);
case CompareIC::SMI:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::NUMBER;
- break;
+ return handle(Type::Smi(), isolate);
case CompareIC::NUMBER:
- if (value->IsNumber()) return CompareIC::NUMBER;
- break;
+ return handle(Type::Number(), isolate);
+ case CompareIC::STRING:
+ return handle(Type::String(), isolate);
case CompareIC::INTERNALIZED_STRING:
- if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
- if (value->IsString()) return CompareIC::STRING;
- if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
+ return handle(Type::InternalizedString(), isolate);
+ case CompareIC::UNIQUE_NAME:
+ return handle(Type::UniqueName(), isolate);
+ case CompareIC::OBJECT:
+ return handle(Type::Receiver(), isolate);
+ case CompareIC::KNOWN_OBJECT:
+ return handle(
+ map.is_null() ? Type::Receiver() : Type::Class(map), isolate);
+ case CompareIC::GENERIC:
+ return handle(Type::Any(), isolate);
+ }
+ UNREACHABLE();
+ return Handle<Type>();
+}
+
+
+void CompareIC::StubInfoToType(int stub_minor_key,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* overall_type,
+ Handle<Map> map,
+ Isolate* isolate) {
+ State left_state, right_state, handler_state;
+ ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
+ &handler_state, NULL);
+ *left_type = StateToType(isolate, left_state);
+ *right_type = StateToType(isolate, right_state);
+ *overall_type = StateToType(isolate, handler_state, map);
+}
+
+
+CompareIC::State CompareIC::NewInputState(State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ if (value->IsJSObject()) return OBJECT;
break;
- case CompareIC::STRING:
- if (value->IsString()) return CompareIC::STRING;
+ case SMI:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
break;
- case CompareIC::UNIQUE_NAME:
- if (value->IsUniqueName()) return CompareIC::UNIQUE_NAME;
+ case NUMBER:
+ if (value->IsNumber()) return NUMBER;
break;
- case CompareIC::OBJECT:
- if (value->IsJSObject()) return CompareIC::OBJECT;
+ case INTERNALIZED_STRING:
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
break;
- case CompareIC::GENERIC:
+ case STRING:
+ if (value->IsString()) return STRING;
break;
- case CompareIC::KNOWN_OBJECT:
+ case UNIQUE_NAME:
+ if (value->IsUniqueName()) return UNIQUE_NAME;
+ break;
+ case OBJECT:
+ if (value->IsJSObject()) return OBJECT;
+ break;
+ case GENERIC:
+ break;
+ case KNOWN_OBJECT:
UNREACHABLE();
break;
}
- return CompareIC::GENERIC;
+ return GENERIC;
}
@@ -2885,8 +2993,8 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
State previous_left, previous_right, previous_state;
ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
&previous_right, &previous_state, NULL);
- State new_left = InputState(previous_left, x);
- State new_right = InputState(previous_right, y);
+ State new_left = NewInputState(previous_left, x);
+ State new_right = NewInputState(previous_right, y);
State state = TargetState(previous_state, previous_left, previous_right,
HasInlinedSmiCode(address()), x, y);
ICCompareStub stub(op_, new_left, new_right, state);
@@ -2934,7 +3042,7 @@ void CompareNilIC::Clear(Address address, Code* target) {
Code::ExtraICState state = target->extended_extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
- stub.ClearTypes();
+ stub.ClearState();
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
@@ -2961,9 +3069,9 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// types must be supported as a result of the miss.
bool already_monomorphic = stub.IsMonomorphic();
- CompareNilICStub::Types old_types = stub.GetTypes();
+ CompareNilICStub::State old_state = stub.GetState();
stub.Record(object);
- old_types.TraceTransition(stub.GetTypes());
+ old_state.TraceTransition(stub.GetState());
NilValue nil = stub.GetNilValue();
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 8c448eb7fd..829c6b1547 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -511,8 +511,8 @@ class StoreIC: public IC {
static void GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode);
static void GenerateNormal(MacroAssembler* masm);
- static void GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* Store(
State state,
@@ -532,6 +532,12 @@ class StoreIC: public IC {
virtual Handle<Code> megamorphic_stub_strict() {
return isolate()->builtins()->StoreIC_Megamorphic_Strict();
}
+ virtual Handle<Code> generic_stub() const {
+ return isolate()->builtins()->StoreIC_Generic();
+ }
+ virtual Handle<Code> generic_stub_strict() const {
+ return isolate()->builtins()->StoreIC_Generic_Strict();
+ }
virtual Handle<Code> global_proxy_stub() {
return isolate()->builtins()->StoreIC_GlobalProxy();
}
@@ -684,6 +690,8 @@ class UnaryOpIC: public IC {
GENERIC
};
+ static Handle<Type> TypeInfoToType(TypeInfo info, Isolate* isolate);
+
explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
@@ -711,6 +719,12 @@ class BinaryOpIC: public IC {
GENERIC
};
+ static void StubInfoToType(int minor_key,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Isolate* isolate);
+
explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
@@ -718,6 +732,9 @@ class BinaryOpIC: public IC {
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
+
+ private:
+ static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
};
@@ -741,6 +758,19 @@ class CompareIC: public IC {
GENERIC
};
+ static State NewInputState(State old_state, Handle<Object> value);
+
+ static Handle<Type> StateToType(Isolate* isolate,
+ State state,
+ Handle<Map> map = Handle<Map>());
+
+ static void StubInfoToType(int stub_minor_key,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* overall_type,
+ Handle<Map> map,
+ Isolate* isolate);
+
CompareIC(Isolate* isolate, Token::Value op)
: IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index e19d6e28f6..80dc8eaca0 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -394,6 +394,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
} else if (chunk->owner()->identity() == CELL_SPACE ||
+ chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
chunk->scan_on_scavenge()) {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
@@ -440,6 +441,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
@@ -474,6 +476,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
ActivateIncrementalWriteBarrier(heap_->old_data_space());
ActivateIncrementalWriteBarrier(heap_->cell_space());
+ ActivateIncrementalWriteBarrier(heap_->property_cell_space());
ActivateIncrementalWriteBarrier(heap_->map_space());
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 7cce14aa23..6a8758026a 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -29,10 +29,12 @@
#include "v8.h"
+#include "allocation-inl.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "heap-profiler.h"
@@ -45,6 +47,7 @@
#include "platform.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
+#include "sampler.h"
#include "scopeinfo.h"
#include "serialize.h"
#include "simulator.h"
@@ -107,6 +110,7 @@ void ThreadLocalTop::InitializeInternal() {
// is complete.
pending_exception_ = NULL;
has_pending_message_ = false;
+ rethrowing_message_ = false;
pending_message_obj_ = NULL;
pending_message_script_ = NULL;
scheduled_exception_ = NULL;
@@ -116,7 +120,7 @@ void ThreadLocalTop::InitializeInternal() {
void ThreadLocalTop::Initialize() {
InitializeInternal();
#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
simulator_ = Simulator::current(isolate_);
#elif V8_TARGET_ARCH_MIPS
simulator_ = Simulator::current(isolate_);
@@ -486,7 +490,8 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
block != NULL;
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
}
// Iterate over pointers on native execution stack.
@@ -1162,6 +1167,22 @@ void Isolate::ScheduleThrow(Object* exception) {
}
+void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
+ ASSERT(handler == try_catch_handler());
+ ASSERT(handler->HasCaught());
+ ASSERT(handler->rethrow_);
+ ASSERT(handler->capture_message_);
+ Object* message = reinterpret_cast<Object*>(handler->message_obj_);
+ Object* script = reinterpret_cast<Object*>(handler->message_script_);
+ ASSERT(message->IsJSMessageObject() || message->IsTheHole());
+ ASSERT(script->IsScript() || script->IsTheHole());
+ thread_local_top()->pending_message_obj_ = message;
+ thread_local_top()->pending_message_script_ = script;
+ thread_local_top()->pending_message_start_pos_ = handler->message_start_pos_;
+ thread_local_top()->pending_message_end_pos_ = handler->message_end_pos_;
+}
+
+
Failure* Isolate::PromoteScheduledException() {
MaybeObject* thrown = scheduled_exception();
clear_scheduled_exception();
@@ -1280,9 +1301,12 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
bool try_catch_needs_message =
- can_be_caught_externally && try_catch_handler()->capture_message_;
+ can_be_caught_externally && try_catch_handler()->capture_message_ &&
+ !thread_local_top()->rethrowing_message_;
bool bootstrapping = bootstrapper()->IsActive();
+ thread_local_top()->rethrowing_message_ = false;
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
if (catchable_by_javascript) {
@@ -1464,8 +1488,9 @@ void Isolate::ReportPendingMessages() {
HandleScope scope(this);
Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
this);
- if (thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
+ if (!thread_local_top_.pending_message_script_->IsTheHole()) {
+ Handle<Script> script(
+ Script::cast(thread_local_top_.pending_message_script_));
int start_pos = thread_local_top_.pending_message_start_pos_;
int end_pos = thread_local_top_.pending_message_end_pos_;
MessageLocation location(script, start_pos, end_pos);
@@ -1487,8 +1512,9 @@ MessageLocation Isolate::GetMessageLocation() {
thread_local_top_.pending_exception_ != heap()->termination_exception() &&
thread_local_top_.has_pending_message_ &&
!thread_local_top_.pending_message_obj_->IsTheHole() &&
- thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
+ !thread_local_top_.pending_message_obj_->IsTheHole()) {
+ Handle<Script> script(
+ Script::cast(thread_local_top_.pending_message_script_));
int start_pos = thread_local_top_.pending_message_start_pos_;
int end_pos = thread_local_top_.pending_message_end_pos_;
return MessageLocation(script, start_pos, end_pos);
@@ -1625,7 +1651,7 @@ char* Isolate::RestoreThread(char* from) {
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
thread_local_top()->simulator_ = Simulator::current(this);
#elif V8_TARGET_ARCH_MIPS
thread_local_top()->simulator_ = Simulator::current(this);
@@ -1754,8 +1780,10 @@ Isolate::Isolate()
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
+ initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
+ function_entry_hook_(NULL),
deferred_handles_head_(NULL),
optimizing_compiler_thread_(this),
marking_thread_(NULL),
@@ -1775,8 +1803,8 @@ Isolate::Isolate()
thread_manager_ = new ThreadManager();
thread_manager_->isolate_ = this;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
simulator_redirection_ = NULL;
@@ -1935,9 +1963,18 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate,
Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
- // Has to be called while counters_ are still alive.
+ // Has to be called while counters_ are still alive
runtime_zone_.DeleteKeptSegment();
+ // The entry stack must be empty when we get here,
+ // except for the default isolate, where it can
+ // still contain up to one entry stack item
+ ASSERT(entry_stack_ == NULL || this == default_isolate_);
+ ASSERT(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
+
+ delete entry_stack_;
+ entry_stack_ = NULL;
+
delete[] assembler_spare_buffer_;
assembler_spare_buffer_ = NULL;
@@ -2004,9 +2041,15 @@ Isolate::~Isolate() {
delete global_handles_;
global_handles_ = NULL;
+ delete string_stream_debug_object_cache_;
+ string_stream_debug_object_cache_ = NULL;
+
delete external_reference_table_;
external_reference_table_ = NULL;
+ delete callback_table_;
+ callback_table_ = NULL;
+
#ifdef ENABLE_DEBUGGER_SUPPORT
delete debugger_;
debugger_ = NULL;
@@ -2038,15 +2081,24 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
try_catch_handler()->has_terminated_ = true;
try_catch_handler()->exception_ = heap()->null_value();
} else {
+ v8::TryCatch* handler = try_catch_handler();
// At this point all non-object (failure) exceptions have
// been dealt with so this shouldn't fail.
ASSERT(!pending_exception()->IsFailure());
- try_catch_handler()->can_continue_ = true;
- try_catch_handler()->has_terminated_ = false;
- try_catch_handler()->exception_ = pending_exception();
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
- }
+ ASSERT(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top_.pending_message_obj_->IsTheHole());
+ ASSERT(thread_local_top_.pending_message_script_->IsScript() ||
+ thread_local_top_.pending_message_script_->IsTheHole());
+ handler->can_continue_ = true;
+ handler->has_terminated_ = false;
+ handler->exception_ = pending_exception();
+ // Propagate to the external try-catch only if we got an actual message.
+ if (thread_local_top_.pending_message_obj_->IsTheHole()) return;
+
+ handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ handler->message_script_ = thread_local_top_.pending_message_script_;
+ handler->message_start_pos_ = thread_local_top_.pending_message_start_pos_;
+ handler->message_end_pos_ = thread_local_top_.pending_message_end_pos_;
}
}
@@ -2078,6 +2130,14 @@ bool Isolate::Init(Deserializer* des) {
ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
+ if (function_entry_hook() != NULL) {
+ // When function entry hooking is in effect, we have to create the code
+ // stubs from scratch to get entry hooks, rather than loading the previously
+ // generated stubs from disk.
+ // If this assert fires, the initialization path has regressed.
+ ASSERT(des == NULL);
+ }
+
// The initialization process does not handle memory exhaustion.
DisallowAllocationFailure disallow_allocation_failure;
@@ -2096,7 +2156,7 @@ bool Isolate::Init(Deserializer* des) {
isolate_addresses_[Isolate::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address());
FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
-#undef C
+#undef ASSIGN_ELEMENT
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
@@ -2111,7 +2171,7 @@ bool Isolate::Init(Deserializer* des) {
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this, runtime_zone());
+ stub_cache_ = new StubCache(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
@@ -2125,7 +2185,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -2213,8 +2273,6 @@ bool Isolate::Init(Deserializer* des) {
LOG(this, LogCompiledFunctions());
}
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
- Internals::kIsolateStateOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -2256,47 +2314,24 @@ bool Isolate::Init(Deserializer* des) {
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
- if (FLAG_parallel_marking && FLAG_marking_threads == 0) {
- FLAG_marking_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_MARKING);
- }
if (FLAG_marking_threads > 0) {
marking_thread_ = new MarkingThread*[FLAG_marking_threads];
for (int i = 0; i < FLAG_marking_threads; i++) {
marking_thread_[i] = new MarkingThread(this);
marking_thread_[i]->Start();
}
- } else {
- FLAG_parallel_marking = false;
- }
-
- if (FLAG_sweeper_threads == 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
}
+
if (FLAG_sweeper_threads > 0) {
sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
for (int i = 0; i < FLAG_sweeper_threads; i++) {
sweeper_thread_[i] = new SweeperThread(this);
sweeper_thread_[i]->Start();
}
- } else {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
- }
- if (FLAG_parallel_recompilation &&
- SystemThreadManager::NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
- FLAG_parallel_recompilation = false;
}
+
+ initialized_from_snapshot_ = (des != NULL);
+
return true;
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 76a5a41e70..a0aecd8b27 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -101,8 +101,8 @@ class Debugger;
class DebuggerAgent;
#endif
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
#endif
@@ -246,8 +246,9 @@ class ThreadLocalTop BASE_EMBEDDED {
ThreadId thread_id_;
MaybeObject* pending_exception_;
bool has_pending_message_;
+ bool rethrowing_message_;
Object* pending_message_obj_;
- Script* pending_message_script_;
+ Object* pending_message_script_;
int pending_message_start_pos_;
int pending_message_end_pos_;
// Use a separate value for scheduled exceptions to preserve the
@@ -263,7 +264,7 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
@@ -368,8 +369,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- /* SafeStackFrameIterator activations count. */ \
- V(int, safe_stack_iterator_counter, 0) \
V(bool, observer_delivery_pending, false) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
@@ -393,8 +392,8 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
next_(NULL),
@@ -406,8 +405,8 @@ class Isolate {
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
@@ -424,8 +423,8 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -549,7 +548,7 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() {return thread_local_top_.save_context_; }
+ SaveContext* save_context() { return thread_local_top_.save_context_; }
void set_save_context(SaveContext* save) {
thread_local_top_.save_context_ = save;
}
@@ -584,7 +583,7 @@ class Isolate {
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = NULL;
+ thread_local_top_.pending_message_script_ = heap_.the_hole_value();
}
v8::TryCatch* try_catch_handler() {
return thread_local_top_.TryCatchHandler();
@@ -762,6 +761,9 @@ class Isolate {
// originally.
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
+ // Re-set pending message, script and positions reported to the TryCatch
+ // back to the TLS for re-use when rethrowing.
+ void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
void ReportPendingMessages();
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
@@ -997,8 +999,8 @@ class Isolate {
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
@@ -1053,6 +1055,8 @@ class Isolate {
context_exit_happened_ = context_exit_happened;
}
+ bool initialized_from_snapshot() { return initialized_from_snapshot_; }
+
double time_millis_since_init() {
return OS::TimeCurrentMillis() - time_millis_at_init_;
}
@@ -1107,14 +1111,19 @@ class Isolate {
callback_table_ = callback_table;
}
+ int id() const { return static_cast<int>(id_); }
+
HStatistics* GetHStatistics();
HTracer* GetHTracer();
+ FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
+ void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
+ function_entry_hook_ = function_entry_hook;
+ }
+
private:
Isolate();
- int id() const { return static_cast<int>(id_); }
-
friend struct GlobalState;
friend struct InitializeGlobalState;
@@ -1292,11 +1301,14 @@ class Isolate {
// that a context was recently exited.
bool context_exit_happened_;
+ // True if this isolate was initialized from a snapshot.
+ bool initialized_from_snapshot_;
+
// Time stamp at initialization.
double time_millis_at_init_;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
@@ -1315,6 +1327,7 @@ class Isolate {
#endif
CpuProfiler* cpu_profiler_;
HeapProfiler* heap_profiler_;
+ FunctionEntryHook function_entry_hook_;
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 152bd63716..72c69100d1 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -43,15 +43,33 @@ namespace internal {
template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
- static Handle<Object> Parse(Handle<String> source, Zone* zone) {
- return JsonParser().ParseJson(source, zone);
+ static Handle<Object> Parse(Handle<String> source) {
+ return JsonParser(source).ParseJson();
}
static const int kEndOfString = -1;
private:
+ explicit JsonParser(Handle<String> source)
+ : source_(source),
+ source_length_(source->length()),
+ isolate_(source->map()->GetHeap()->isolate()),
+ factory_(isolate_->factory()),
+ zone_(isolate_),
+ object_constructor_(isolate_->native_context()->object_function(),
+ isolate_),
+ position_(-1) {
+ FlattenString(source_);
+ pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
+
+ // Optimized fast case where we only have ASCII characters.
+ if (seq_ascii) {
+ seq_source_ = Handle<SeqOneByteString>::cast(source_);
+ }
+ }
+
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> source, Zone* zone);
+ Handle<Object> ParseJson();
inline void Advance() {
position_++;
@@ -179,13 +197,14 @@ class JsonParser BASE_EMBEDDED {
inline Isolate* isolate() { return isolate_; }
inline Factory* factory() { return factory_; }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
- inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
static const int kPretenureTreshold = 100 * 1024;
private:
+ Zone* zone() { return &zone_; }
+
Handle<String> source_;
int source_length_;
Handle<SeqOneByteString> seq_source_;
@@ -193,32 +212,14 @@ class JsonParser BASE_EMBEDDED {
PretenureFlag pretenure_;
Isolate* isolate_;
Factory* factory_;
+ Zone zone_;
Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
- Zone* zone_;
};
template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
- Zone* zone) {
- isolate_ = source->map()->GetHeap()->isolate();
- factory_ = isolate_->factory();
- object_constructor_ = Handle<JSFunction>(
- isolate()->native_context()->object_function(), isolate());
- zone_ = zone;
- FlattenString(source);
- source_ = source;
- source_length_ = source_->length();
- pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
-
- // Optimized fast case where we only have ASCII characters.
- if (seq_ascii) {
- seq_source_ = Handle<SeqOneByteString>::cast(source_);
- }
-
- // Set initial position right before the string.
- position_ = -1;
+Handle<Object> JsonParser<seq_ascii>::ParseJson() {
// Advance to the first character (possibly EOS)
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
@@ -264,7 +265,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
break;
}
- MessageLocation location(factory->NewScript(source),
+ MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
Handle<Object> result = factory->NewSyntaxError(message, array);
@@ -323,7 +324,6 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
Handle<Map> map(json_object->map());
- ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
ZoneList<Handle<Object> > properties(8, zone());
ASSERT_EQ(c0_, '{');
@@ -469,7 +469,6 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
HandleScope scope(isolate());
- ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '[');
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 7838c04a9e..5da73985de 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -168,10 +168,9 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flag_str,
- Zone* zone) {
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+ Handle<String> flag_str) {
Isolate* isolate = re->GetIsolate();
+ Zone zone(isolate);
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
CompilationCache* compilation_cache = isolate->compilation_cache();
Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
@@ -188,7 +187,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result, zone)) {
+ &parse_result, &zone)) {
// Throw an exception if we fail to parse the pattern.
ThrowRegExpException(re,
pattern,
@@ -410,7 +409,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
+ Zone zone(isolate);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -441,10 +440,9 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
if (!pattern->IsFlat()) FlattenString(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- Zone* zone = isolate->runtime_zone();
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&compile_data,
- zone)) {
+ &zone)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
ThrowRegExpException(re,
@@ -461,7 +459,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern,
sample_subject,
is_ascii,
- zone);
+ &zone);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message =
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 181a1b26b1..528a9a2f46 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -71,8 +71,7 @@ class RegExpImpl {
// Returns false if compilation fails.
static Handle<Object> Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flags,
- Zone* zone);
+ Handle<String> flags);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index a6d053aa72..8cca19b2ef 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -148,14 +148,13 @@ void UseIterator::Advance() {
void LAllocator::SetLiveRangeAssignedRegister(
LiveRange* range,
int reg,
- RegisterKind register_kind,
- Zone* zone) {
+ RegisterKind register_kind) {
if (register_kind == DOUBLE_REGISTERS) {
assigned_double_registers_->Add(reg);
} else {
assigned_registers_->Add(reg);
}
- range->set_assigned_register(reg, register_kind, zone);
+ range->set_assigned_register(reg, register_kind, chunk()->zone());
}
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 1fd921f191..2e2f802558 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -541,16 +541,16 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->zone()),
+ : zone_(graph->isolate()),
chunk_(NULL),
- live_in_sets_(graph->blocks()->length(), zone_),
- live_ranges_(num_values * 2, zone_),
+ live_in_sets_(graph->blocks()->length(), zone()),
+ live_ranges_(num_values * 2, zone()),
fixed_live_ranges_(NULL),
fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2, zone_),
- active_live_ranges_(8, zone_),
- inactive_live_ranges_(8, zone_),
- reusable_slots_(8, zone_),
+ unhandled_live_ranges_(num_values * 2, zone()),
+ active_live_ranges_(8, zone()),
+ inactive_live_ranges_(8, zone()),
+ reusable_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
mode_(GENERAL_REGISTERS),
@@ -571,7 +571,7 @@ void LAllocator::InitializeLivenessAnalysis() {
BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
// Compute live out for the given block, except not including backward
// successor edges.
- BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
+ BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
// Process all successor blocks.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@@ -609,7 +609,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, zone_);
+ range->AddUseInterval(start, end, zone());
iterator.Advance();
}
}
@@ -640,7 +640,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
TraceAlloc("Fixed reg is tagged at %d\n", pos);
LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand, zone());
+ instr->pointer_map()->RecordPointer(operand, chunk()->zone());
}
}
return operand;
@@ -651,9 +651,9 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
ASSERT(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
+ result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_);
+ SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS);
fixed_live_ranges_[index] = result;
}
return result;
@@ -664,9 +664,10 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
ASSERT(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
+ result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
+ chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_);
+ SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -679,7 +680,7 @@ LiveRange* LAllocator::LiveRangeFor(int index) {
}
LiveRange* result = live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(index, zone_);
+ result = new(zone()) LiveRange(index, chunk()->zone());
live_ranges_[index] = result;
}
return result;
@@ -725,15 +726,15 @@ void LAllocator::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction(), zone_);
- range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone_);
+ range->AddUseInterval(position, position.NextInstruction(), zone());
+ range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
} else {
range->ShortenTo(position);
}
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, zone_);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
}
}
@@ -746,9 +747,9 @@ void LAllocator::Use(LifetimePosition block_start,
if (range == NULL) return;
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, zone_);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
}
- range->AddUseInterval(block_start, position, zone_);
+ range->AddUseInterval(block_start, position, zone());
}
@@ -756,7 +757,8 @@ void LAllocator::AddConstraintsGapMove(int index,
LOperand* from,
LOperand* to) {
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -765,13 +767,13 @@ void LAllocator::AddConstraintsGapMove(int index,
if (cur_to->IsUnallocated()) {
if (LUnallocated::cast(cur_to)->virtual_register() ==
LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to, zone());
+ move->AddMove(cur.source(), to, chunk()->zone());
return;
}
}
}
}
- move->AddMove(from, to, zone());
+ move->AddMove(from, to, chunk()->zone());
}
@@ -811,7 +813,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
LiveRange* range = LiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
+ LUnallocated* output_copy = first_output->CopyUnconstrained(
+ chunk()->zone());
bool is_tagged = HasTaggedValue(first_output->virtual_register());
AllocateFixed(first_output, gap_index, is_tagged);
@@ -832,8 +835,10 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
- move->AddMove(first_output, range->GetSpillOperand(), zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
+ chunk()->zone());
+ move->AddMove(first_output, range->GetSpillOperand(),
+ chunk()->zone());
}
}
@@ -842,7 +847,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
for (UseIterator it(second); !it.Done(); it.Advance()) {
LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -851,7 +857,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
int vreg = GetVirtualRegister();
if (!AllocationOk()) return;
cur_input->set_virtual_register(vreg);
@@ -860,7 +867,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
DOUBLE_REGISTERS) {
double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_,
- zone_);
+ zone());
}
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -876,7 +883,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
cur_input->set_virtual_register(second_output->virtual_register());
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -884,7 +892,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int index = gap_index + 1;
LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy, zone());
+ instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
}
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
@@ -913,7 +921,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (IsGapAt(index)) {
// We have a gap at this position.
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
@@ -965,7 +974,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
LiveRange* range = FixedLiveRangeFor(i);
range->AddUseInterval(curr_position,
curr_position.InstructionEnd(),
- zone_);
+ zone());
}
}
}
@@ -977,7 +986,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position,
curr_position.InstructionEnd(),
- zone_);
+ zone());
}
}
}
@@ -1025,7 +1034,8 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
const ZoneList<HPhi*>* phis = block->phis();
for (int i = 0; i < phis->length(); ++i) {
HPhi* phi = phis->at(i);
- LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
+ LUnallocated* phi_operand =
+ new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
phi_operand->set_virtual_register(phi->id());
for (int j = 0; j < phi->OperandCount(); ++j) {
HValue* op = phi->OperandAt(j);
@@ -1035,7 +1045,8 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
operand = chunk_->DefineConstantOperand(constant);
} else {
ASSERT(!op->EmitAtUses());
- LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
+ LUnallocated* unalloc =
+ new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id());
operand = unalloc;
}
@@ -1058,17 +1069,17 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
InstructionAt(cur_block->last_instruction_index());
if (branch->HasPointerMap()) {
if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
- branch->pointer_map()->RecordPointer(phi_operand, zone());
+ branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
} else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand, zone());
+ branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
}
}
}
LiveRange* live_range = LiveRangeFor(phi->id());
LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START, zone())->
- AddMove(phi_operand, live_range->GetSpillOperand(), zone());
+ label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
+ AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
}
}
@@ -1078,12 +1089,11 @@ bool LAllocator::Allocate(LChunk* chunk) {
ASSERT(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
assigned_registers_ =
- new(zone()) BitVector(Register::NumAllocatableRegisters(), zone());
- assigned_registers_->Clear();
+ new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
+ chunk->zone());
assigned_double_registers_ =
- new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
- zone());
- assigned_double_registers_->Clear();
+ new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
+ chunk->zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1093,7 +1103,6 @@ bool LAllocator::Allocate(LChunk* chunk) {
AllocateDoubleRegisters();
if (!AllocationOk()) return false;
PopulatePointerMaps();
- if (has_osr_entry_) ProcessOsrEntry();
ConnectRanges();
ResolveControlFlow();
return true;
@@ -1101,7 +1110,7 @@ bool LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
- HPhase phase("L_Register constraints", chunk_);
+ LAllocatorPhase phase("L_Register constraints", this);
first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
@@ -1113,7 +1122,7 @@ void LAllocator::MeetRegisterConstraints() {
void LAllocator::ResolvePhis() {
- HPhase phase("L_Resolve phis", chunk_);
+ LAllocatorPhase phase("L_Resolve phis", this);
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1149,8 +1158,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
if (cur_cover->IsSpilled()) return;
ASSERT(pred_cover != NULL && cur_cover != NULL);
if (pred_cover != cur_cover) {
- LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
- LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
+ LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
+ LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL;
if (block->predecessors()->length() == 1) {
@@ -1170,7 +1179,7 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
LInstruction* branch = InstructionAt(pred->last_instruction_index());
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op, zone());
+ branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
} else if (!cur_op->IsDoubleStackSlot() &&
!cur_op->IsDoubleRegister()) {
branch->pointer_map()->RemovePointer(cur_op);
@@ -1178,7 +1187,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
}
}
gap->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(pred_op, cur_op, zone());
+ LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
+ chunk()->zone());
}
}
}
@@ -1189,11 +1199,11 @@ LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
if (IsGapAt(index)) {
LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
+ pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
+ (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
}
@@ -1204,7 +1214,7 @@ HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
void LAllocator::ConnectRanges() {
- HPhase phase("L_Connect ranges", this);
+ LAllocatorPhase phase("L_Connect ranges", this);
for (int i = 0; i < live_ranges()->length(); ++i) {
LiveRange* first_range = live_ranges()->at(i);
if (first_range == NULL || first_range->parent() != NULL) continue;
@@ -1223,9 +1233,12 @@ void LAllocator::ConnectRanges() {
}
if (should_insert) {
LParallelMove* move = GetConnectingParallelMove(pos);
- LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
- LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
- move->AddMove(prev_operand, cur_operand, zone());
+ LOperand* prev_operand = first_range->CreateAssignedOperand(
+ chunk()->zone());
+ LOperand* cur_operand = second_range->CreateAssignedOperand(
+ chunk()->zone());
+ move->AddMove(prev_operand, cur_operand,
+ chunk()->zone());
}
}
}
@@ -1244,7 +1257,7 @@ bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
void LAllocator::ResolveControlFlow() {
- HPhase phase("L_Resolve control flow", this);
+ LAllocatorPhase phase("L_Resolve control flow", this);
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id);
@@ -1265,7 +1278,7 @@ void LAllocator::ResolveControlFlow() {
void LAllocator::BuildLiveRanges() {
- HPhase phase("L_Build live ranges", this);
+ LAllocatorPhase phase("L_Build live ranges", this);
InitializeLivenessAnalysis();
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1290,7 +1303,8 @@ void LAllocator::BuildLiveRanges() {
LOperand* hint = NULL;
LOperand* phi_operand = NULL;
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() &&
@@ -1327,7 +1341,7 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, zone_);
+ range->EnsureInterval(start, end, zone());
iterator.Advance();
}
@@ -1377,7 +1391,7 @@ bool LAllocator::SafePointsAreInOrder() const {
void LAllocator::PopulatePointerMaps() {
- HPhase phase("L_Populate pointer maps", this);
+ LAllocatorPhase phase("L_Populate pointer maps", this);
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
ASSERT(SafePointsAreInOrder());
@@ -1448,47 +1462,16 @@ void LAllocator::PopulatePointerMaps() {
safe_point >= range->spill_start_index()) {
TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand(), zone());
+ map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
}
if (!cur->IsSpilled()) {
TraceAlloc("Pointer in register for range %d (start at %d) "
"at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point);
- LOperand* operand = cur->CreateAssignedOperand(zone_);
+ LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand, zone());
- }
- }
- }
-}
-
-
-void LAllocator::ProcessOsrEntry() {
- const ZoneList<LInstruction*>* instrs = chunk_->instructions();
-
- // Linear search for the OSR entry instruction in the chunk.
- int index = -1;
- while (++index < instrs->length() &&
- !instrs->at(index)->IsOsrEntry()) {
- }
- ASSERT(index < instrs->length());
- LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
-
- LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* range = live_ranges()->at(i);
- if (range != NULL) {
- if (range->Covers(position) &&
- range->HasRegisterAssigned() &&
- range->TopLevel()->HasAllocatedSpillOperand()) {
- int reg_index = range->assigned_register();
- LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
- if (range->IsDouble()) {
- instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
- } else {
- instruction->MarkSpilledRegister(reg_index, spill_operand);
- }
+ map->RecordPointer(operand, chunk()->zone());
}
}
}
@@ -1496,14 +1479,14 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("L_Allocate general registers", this);
+ LAllocatorPhase phase("L_Allocate general registers", this);
num_registers_ = Register::NumAllocatableRegisters();
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
- HPhase phase("L_Allocate double registers", this);
+ LAllocatorPhase phase("L_Allocate double registers", this);
num_registers_ = DoubleRegister::NumAllocatableRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
@@ -1829,7 +1812,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, register_index, mode_);
return true;
}
}
@@ -1864,7 +1847,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg, mode_);
return true;
}
@@ -1949,7 +1932,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg, mode_);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
@@ -2061,7 +2044,7 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
int vreg = GetVirtualRegister();
if (!AllocationOk()) return NULL;
LiveRange* result = LiveRangeFor(vreg);
- range->SplitAt(pos, result, zone_);
+ range->SplitAt(pos, result, zone());
return result;
}
@@ -2169,7 +2152,7 @@ void LAllocator::Spill(LiveRange* range) {
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op);
}
- range->MakeSpilled(zone_);
+ range->MakeSpilled(chunk()->zone());
}
@@ -2192,4 +2175,32 @@ void LAllocator::Verify() const {
#endif
+LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
+ : CompilationPhase(name, allocator->graph()->info()),
+ allocator_(allocator) {
+ if (FLAG_hydrogen_stats) {
+ allocator_zone_start_allocation_size_ =
+ allocator->zone()->allocation_size();
+ }
+}
+
+
+LAllocatorPhase::~LAllocatorPhase() {
+ if (FLAG_hydrogen_stats) {
+ unsigned size = allocator_->zone()->allocation_size() -
+ allocator_zone_start_allocation_size_;
+ isolate()->GetHStatistics()->SaveTiming(name(), 0, size);
+ }
+
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
+ isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
+ }
+
+#ifdef DEBUG
+ if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 552ebdd1de..e5edd3cf03 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -434,7 +434,7 @@ class LAllocator BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
- Zone* zone() const { return zone_; }
+ Zone* zone() { return &zone_; }
int GetVirtualRegister() {
if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
@@ -474,7 +474,6 @@ class LAllocator BASE_EMBEDDED {
void ConnectRanges();
void ResolveControlFlow();
void PopulatePointerMaps();
- void ProcessOsrEntry();
void AllocateRegisters();
bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
inline bool SafePointsAreInOrder() const;
@@ -571,8 +570,7 @@ class LAllocator BASE_EMBEDDED {
inline void SetLiveRangeAssignedRegister(LiveRange* range,
int reg,
- RegisterKind register_kind,
- Zone* zone);
+ RegisterKind register_kind);
// Return parallel move that should be used to connect ranges split at the
// given position.
@@ -599,7 +597,7 @@ class LAllocator BASE_EMBEDDED {
inline LGap* GapAt(int index);
- Zone* zone_;
+ Zone zone_;
LPlatformChunk* chunk_;
@@ -646,6 +644,19 @@ class LAllocator BASE_EMBEDDED {
};
+class LAllocatorPhase : public CompilationPhase {
+ public:
+ LAllocatorPhase(const char* name, LAllocator* allocator);
+ ~LAllocatorPhase();
+
+ private:
+ LAllocator* allocator_;
+ unsigned allocator_zone_start_allocation_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_ALLOCATOR_H_
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 2993c9aa73..b22fdf6e28 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -307,7 +307,7 @@ Label* LChunk::GetAssemblyLabel(int block_id) const {
}
void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
+ LPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@@ -491,4 +491,11 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
+LPhase::~LPhase() {
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceLithium(name(), chunk_);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 170e5c89bc..1e0784eb98 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -527,13 +527,12 @@ class LEnvironment: public ZoneObject {
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
+ translation_size_(value_count),
parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count, zone),
is_tagged_(value_count, zone),
is_uint32_(value_count, zone),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
outer_(outer),
entry_(entry),
zone_(zone) { }
@@ -544,15 +543,13 @@ class LEnvironment: public ZoneObject {
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
BailoutId ast_id() const { return ast_id_; }
+ int translation_size() const { return translation_size_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
- LOperand** spilled_registers() const { return spilled_registers_; }
- LOperand** spilled_double_registers() const {
- return spilled_double_registers_;
- }
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
HEnterInlined* entry() { return entry_; }
+ Zone* zone() const { return zone_; }
void AddValue(LOperand* operand,
Representation representation,
@@ -560,11 +557,11 @@ class LEnvironment: public ZoneObject {
values_.Add(operand, zone());
if (representation.IsSmiOrTagged()) {
ASSERT(!is_uint32);
- is_tagged_.Add(values_.length() - 1);
+ is_tagged_.Add(values_.length() - 1, zone());
}
if (is_uint32) {
- is_uint32_.Add(values_.length() - 1);
+ is_uint32_.Add(values_.length() - 1, zone());
}
}
@@ -588,16 +585,8 @@ class LEnvironment: public ZoneObject {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
void PrintTo(StringStream* stream);
- Zone* zone() const { return zone_; }
-
private:
Handle<JSFunction> closure_;
FrameType frame_type_;
@@ -605,21 +594,17 @@ class LEnvironment: public ZoneObject {
int deoptimization_index_;
int translation_index_;
BailoutId ast_id_;
+ int translation_size_;
int parameter_count_;
int pc_offset_;
- ZoneList<LOperand*> values_;
- BitVector is_tagged_;
- BitVector is_uint32_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
+ // Value array: [parameters] [locals] [expression stack] [de-materialized].
+ // |>--------- translation_size ---------<|
+ ZoneList<LOperand*> values_;
+ GrowableBitVector is_tagged_;
+ GrowableBitVector is_uint32_;
LEnvironment* outer_;
HEnterInlined* entry_;
-
Zone* zone_;
};
@@ -774,6 +759,20 @@ enum NumberUntagDMode {
};
+class LPhase : public CompilationPhase {
+ public:
+ LPhase(const char* name, LChunk* chunk)
+ : CompilationPhase(name, chunk->info()),
+ chunk_(chunk) { }
+ ~LPhase();
+
+ private:
+ LChunk* chunk_;
+
+ DISALLOW_COPY_AND_ASSIGN(LPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index a01e502300..3ec2da3327 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1832,11 +1832,11 @@ class MultipleFunctionTarget {
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
- TARGET& target, bool do_drop, Zone* zone) {
+ TARGET& target, bool do_drop) {
Isolate* isolate = Isolate::Current();
Debug* debug = isolate->debug();
- ZoneScope scope(zone, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap(isolate, zone);
+ Zone zone(isolate);
+ Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
int top_frame_index = -1;
@@ -1928,12 +1928,11 @@ static const char* DropActivationsInActiveThreadImpl(
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
- Zone* zone) {
+ Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
MultipleFunctionTarget target(shared_info_array, result);
const char* message =
- DropActivationsInActiveThreadImpl(target, do_drop, zone);
+ DropActivationsInActiveThreadImpl(target, do_drop);
if (message) {
return message;
}
@@ -1980,7 +1979,7 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
+ Handle<JSArray> shared_info_array, bool do_drop) {
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
@@ -2006,7 +2005,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Try to drop activations from the current stack.
const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
+ DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
@@ -2047,10 +2046,10 @@ class SingleFrameTarget {
// Finds a drops required frame and all frames above.
// Returns error message or NULL.
-const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
- const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
+ const char* result = DropActivationsInActiveThreadImpl(target, true);
if (result != NULL) {
return result;
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 5b12854d8c..0efbb95cc0 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -121,11 +121,11 @@ class LiveEdit : AllStatic {
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
+ Handle<JSArray> shared_info_array, bool do_drop);
// Restarts the call frame and completely drops all frames above it.
// Return error message or NULL.
- static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
+ static const char* RestartFrame(JavaScriptFrame* frame);
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 8aebbc7dde..7f653cb728 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -29,7 +29,6 @@
#define V8_LOG_INL_H_
#include "log.h"
-#include "cpu-profiler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index f033172734..a733b523ea 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -237,6 +237,18 @@ void LogMessageBuilder::Append(const char c) {
}
+void LogMessageBuilder::AppendDoubleQuotedString(const char* string) {
+ Append('"');
+ for (const char* p = string; *p != '\0'; p++) {
+ if (*p == '"') {
+ Append('\\');
+ }
+ Append(*p);
+ }
+ Append('"');
+}
+
+
void LogMessageBuilder::Append(String* str) {
DisallowHeapAllocation no_gc; // Ensure string stay valid.
int length = str->length();
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index a1867f2582..c4995402ca 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -132,6 +132,9 @@ class LogMessageBuilder BASE_EMBEDDED {
// Append a character to the log message.
void Append(const char c);
+ // Append double quoted string to the log message.
+ void AppendDoubleQuotedString(const char* string);
+
// Append a heap string.
void Append(String* str);
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 610a63b37a..e95b96332e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "deoptimizer.h"
#include "global-handles.h"
#include "log.h"
@@ -435,7 +436,7 @@ void Logger::IssueCodeAddedEvent(Code* code,
event.code_len = code->instruction_size();
Handle<Script> script_handle =
script != NULL ? Handle<Script>(script) : Handle<Script>();
- event.script = v8::Handle<v8::Script>(ToApi<v8::Script>(script_handle));
+ event.script = ToApiHandle<v8::Script>(script_handle);
event.name.str = name;
event.name.len = name_len;
@@ -637,6 +638,16 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
}
+void Logger::CodeDeoptEvent(Code* code) {
+ if (!log_->IsEnabled()) return;
+ ASSERT(FLAG_log_internal_timer_events);
+ LogMessageBuilder msg(this);
+ int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
+ msg.WriteToLogFile();
+}
+
+
void Logger::TimerEvent(StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
@@ -862,7 +873,7 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,-3,",
+ msg.Append("%s,%s,-2,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
@@ -903,18 +914,39 @@ void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- const char* comment) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
+void Logger::AppendName(Name* name) {
+ if (name->IsString()) {
+ name_buffer_->AppendString(String::cast(name));
+ } else {
+ Symbol* symbol = Symbol::cast(name);
+ name_buffer_->AppendBytes("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ name_buffer_->AppendBytes("\"");
+ name_buffer_->AppendString(String::cast(symbol->name()));
+ name_buffer_->AppendBytes("\" ");
+ }
+ name_buffer_->AppendBytes("hash ");
+ name_buffer_->AppendHex(symbol->Hash());
+ name_buffer_->AppendByte(')');
+ }
+}
+
+
+void Logger::InitNameBuffer(LogEventsAndTags tag) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(comment);
- }
+}
+
+
+void Logger::LogRecordedBuffer(Code* code, SharedFunctionInfo* shared) {
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
+ Script* script = shared && shared->script()->IsScript() ?
+ Script::cast(shared->script()) : NULL;
+ IssueCodeAddedEvent(code,
+ script,
+ name_buffer_->get(),
+ name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -923,21 +955,49 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (Serializer::enabled()) {
RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
}
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- for (const char* p = comment; *p != '\0'; p++) {
- if (*p == '"') {
- msg.Append('\\');
+}
+
+
+void Logger::AppendCodeCreateHeader(LogMessageBuilder* msg,
+ LogEventsAndTags tag,
+ Code* code) {
+ ASSERT(msg);
+ msg->Append("%s,%s,%d,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag],
+ code->kind());
+ msg->AppendAddress(code->address());
+ msg->Append(",%d,", code->ExecutableSize());
+}
+
+
+void Logger::AppendSymbolName(LogMessageBuilder* msg,
+ Symbol* symbol) {
+ ASSERT(symbol);
+ msg->Append("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ msg->Append("\"");
+ msg->AppendDetailed(String::cast(symbol->name()), false);
+ msg->Append("\" ");
}
- msg.Append(*p);
+ msg->Append("hash %x)", symbol->Hash());
+}
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ const char* comment) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
+ InitNameBuffer(tag);
+ name_buffer_->AppendBytes(comment);
+ LogRecordedBuffer(code, NULL);
}
- msg.Append('"');
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
+ AppendCodeCreateHeader(&msg, tag, code);
+ msg.AppendDoubleQuotedString(comment);
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -948,55 +1008,20 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Name* name) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- if (name->IsString()) {
- name_buffer_->AppendString(String::cast(name));
- } else {
- Symbol* symbol = Symbol::cast(name);
- name_buffer_->AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined()) {
- name_buffer_->AppendBytes("\"");
- name_buffer_->AppendString(String::cast(symbol->name()));
- name_buffer_->AppendBytes("\" ");
- }
- name_buffer_->AppendBytes("hash ");
- name_buffer_->AppendHex(symbol->Hash());
- name_buffer_->AppendByte(')');
- }
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ InitNameBuffer(tag);
+ AppendName(name);
+ LogRecordedBuffer(code, NULL);
}
- if (!FLAG_log_code) return;
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,", code->ExecutableSize());
+ AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
msg.Append('"');
msg.AppendDetailed(String::cast(name), false);
msg.Append('"');
} else {
- Symbol* symbol = Symbol::cast(name);
- msg.Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
- msg.Append("\"");
- msg.AppendDetailed(String::cast(symbol->name()), false);
- msg.Append("\" ");
- }
- msg.Append("hash %x)", symbol->Hash());
+ AppendSymbolName(&msg, Symbol::cast(name));
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -1020,65 +1045,25 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Name* name) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
+ InitNameBuffer(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
- if (name->IsString()) {
- name_buffer_->AppendString(String::cast(name));
- } else {
- Symbol* symbol = Symbol::cast(name);
- name_buffer_->AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined()) {
- name_buffer_->AppendBytes("\"");
- name_buffer_->AppendString(String::cast(symbol->name()));
- name_buffer_->AppendBytes("\" ");
- }
- name_buffer_->AppendBytes("hash ");
- name_buffer_->AppendHex(symbol->Hash());
- name_buffer_->AppendByte(')');
- }
- }
- if (code_event_handler_ != NULL) {
- Script* script =
- shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
- IssueCodeAddedEvent(code,
- script,
- name_buffer_->get(),
- name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ AppendName(name);
+ LogRecordedBuffer(code, shared);
}
- if (!FLAG_log_code) return;
- if (code == Isolate::Current()->builtins()->builtin(
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ if (code == isolate_->builtins()->builtin(
Builtins::kLazyCompile))
return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,", code->ExecutableSize());
+ AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\"", *str);
} else {
- Symbol* symbol = Symbol::cast(name);
- msg.Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
- msg.Append("\"");
- msg.AppendDetailed(String::cast(symbol->name()), false);
- msg.Append("\" ");
- }
- msg.Append("hash %x)", symbol->Hash());
+ AppendSymbolName(&msg, Symbol::cast(name));
}
msg.Append(',');
msg.AppendAddress(shared->address());
@@ -1098,9 +1083,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Name* source, int line) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
+ InitNameBuffer(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendString(shared->DebugName());
name_buffer_->AppendByte(' ');
@@ -1113,45 +1096,21 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(line);
+ LogRecordedBuffer(code, shared);
}
- if (code_event_handler_ != NULL) {
- Script* script =
- shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
- IssueCodeAddedEvent(code,
- script,
- name_buffer_->get(),
- name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
LogMessageBuilder msg(this);
+ AppendCodeCreateHeader(&msg, tag, code);
SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s ", code->ExecutableSize(), *name);
+ msg.Append("\"%s ", *name);
if (source->IsString()) {
SmartArrayPointer<char> sourcestr =
String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s", *sourcestr);
} else {
- Symbol* symbol = Symbol::cast(source);
- msg.Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
- msg.Append("\"");
- msg.AppendDetailed(String::cast(symbol->name()), false);
- msg.Append("\" ");
- }
- msg.Append("hash %x)", symbol->Hash());
+ AppendSymbolName(&msg, Symbol::cast(source));
}
msg.Append(":%d\",", line);
msg.AppendAddress(shared->address());
@@ -1164,29 +1123,15 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
+ InitNameBuffer(tag);
name_buffer_->AppendInt(args_count);
+ LogRecordedBuffer(code, NULL);
}
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+ AppendCodeCreateHeader(&msg, tag, code);
+ msg.Append("\"args_count: %d\"", args_count);
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -1202,30 +1147,17 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
- name_buffer_->AppendByte(':');
+ InitNameBuffer(REG_EXP_TAG);
name_buffer_->AppendString(source);
+ LogRecordedBuffer(code, NULL);
}
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,-2,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[REG_EXP_TAG]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
+ AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
+ msg.Append('"');
msg.AppendDetailed(source, false);
- msg.Append('\"');
+ msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -1294,12 +1226,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
const char* code_name = address_to_name_map_->Lookup(addr);
if (code_name == NULL) return; // Not a code object.
LogMessageBuilder msg(this);
- msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
- for (const char* p = code_name; *p != '\0'; ++p) {
- if (*p == '"') msg.Append('\\');
- msg.Append(*p);
- }
- msg.Append("\"\n");
+ msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+ msg.AppendDoubleQuotedString(code_name);
+ msg.Append("\n");
msg.WriteToLogFile();
}
if (!FLAG_log_snapshot_positions) return;
@@ -1371,14 +1300,7 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
msg.Append(String::cast(name));
msg.Append('"');
} else {
- Symbol* symbol = Symbol::cast(name);
- msg.Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
- msg.Append("\"");
- msg.AppendDetailed(String::cast(symbol->name()), false);
- msg.Append("\" ");
- }
- msg.Append("hash %x)", symbol->Hash());
+ AppendSymbolName(&msg, Symbol::cast(name));
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -1442,8 +1364,6 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
- msg.Append(',');
- msg.AppendAddress(sample->sp);
msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
if (sample->has_external_callback) {
msg.Append(",1,");
@@ -1600,6 +1520,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
break;
+ case Code::REGEXP:
+ description = "Regular expression code";
+ tag = Logger::REG_EXP_TAG;
+ break;
case Code::BUILTIN:
description = "A builtin from the snapshot";
tag = Logger::BUILTIN_TAG;
@@ -1628,6 +1552,8 @@ void Logger::LogCodeObject(Object* object) {
description = "A keyed call IC from the snapshot";
tag = Logger::KEYED_CALL_IC_TAG;
break;
+ case Code::NUMBER_OF_KINDS:
+ break;
}
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
@@ -1781,7 +1707,7 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
+ if (*code_objects[i] == isolate_->builtins()->builtin(
Builtins::kLazyCompile))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
@@ -1861,7 +1787,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
code_event_handler_ = event_handler;
if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
- HandleScope scope(Isolate::Current());
+ HandleScope scope(isolate_);
LogCodeObjects();
LogCompiledFunctions();
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 531f1de0bd..07ecd0efe7 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -293,6 +293,8 @@ class Logger {
// ==== Events logged by --log-timer-events. ====
enum StartEnd { START, END };
+ void CodeDeoptEvent(Code* code);
+
void TimerEvent(StartEnd se, const char* name);
static void EnterExternal(Isolate* isolate);
@@ -413,6 +415,21 @@ class Logger {
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
+ // Helper method. It resets name_buffer_ and add tag name into it.
+ void InitNameBuffer(LogEventsAndTags tag);
+
+ // Helper method. It push recorded buffer into different handlers.
+ void LogRecordedBuffer(Code*, SharedFunctionInfo*);
+
+ // Helper method. It dumps name into name_buffer_.
+ void AppendName(Name* name);
+
+ // Appends standard code header.
+ void AppendCodeCreateHeader(LogMessageBuilder*, LogEventsAndTags, Code*);
+
+ // Appends symbol for the name.
+ void AppendSymbolName(LogMessageBuilder*, Symbol*);
+
// Emits general information about generated code.
void LogCodeInfo();
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 643d6c7709..e442b4413a 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -117,6 +117,7 @@ macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
+macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
@@ -144,7 +145,6 @@ const kBoundArgumentsStartIndex = 2;
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
-macro TO_POSITIVE_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? (arg > 0 ? arg : 0) : %NumberToPositiveInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index dc2db4b096..638968506d 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -29,6 +29,7 @@
#include "code-stubs.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "deoptimizer.h"
#include "execution.h"
#include "gdb-jit.h"
@@ -148,6 +149,7 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->old_data_space());
VerifyMarking(heap->code_space());
VerifyMarking(heap->cell_space());
+ VerifyMarking(heap->property_cell_space());
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
@@ -229,6 +231,7 @@ static void VerifyEvacuation(Heap* heap) {
VerifyEvacuation(heap->old_data_space());
VerifyEvacuation(heap->code_space());
VerifyEvacuation(heap->cell_space());
+ VerifyEvacuation(heap->property_cell_space());
VerifyEvacuation(heap->map_space());
VerifyEvacuation(heap->new_space());
@@ -283,7 +286,7 @@ class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
array->set_length(length);
}
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
case JS_PROXY_TYPE:
case JS_VALUE_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
@@ -375,6 +378,7 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->map_space());
TraceFragmentation(heap()->cell_space());
+ TraceFragmentation(heap()->property_cell_space());
}
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
@@ -468,6 +472,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_data_space());
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->cell_space());
+ VerifyMarkbitsAreClean(heap_->property_cell_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
@@ -529,6 +534,7 @@ void MarkCompactCollector::ClearMarkbits() {
ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
ClearMarkbitsInPagedSpace(heap_->old_data_space());
ClearMarkbitsInPagedSpace(heap_->cell_space());
+ ClearMarkbitsInPagedSpace(heap_->property_cell_space());
ClearMarkbitsInNewSpace(heap_->new_space());
LargeObjectIterator it(heap_->lo_space());
@@ -648,6 +654,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
case CODE_SPACE: return "CODE_SPACE";
case MAP_SPACE: return "MAP_SPACE";
case CELL_SPACE: return "CELL_SPACE";
+ case PROPERTY_CELL_SPACE:
+ return "PROPERTY_CELL_SPACE";
case LO_SPACE: return "LO_SPACE";
default:
UNREACHABLE();
@@ -1003,8 +1011,9 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
if (FLAG_trace_code_flushing && shared->is_compiled()) {
- SmartArrayPointer<char> name = shared->DebugName()->ToCString();
- PrintF("[code-flushing clears: %s]\n", *name);
+ PrintF("[code-flushing clears: ");
+ shared->ShortPrint();
+ PrintF(" - age: %d]\n", code->GetAge());
}
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
@@ -1044,8 +1053,9 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
if (FLAG_trace_code_flushing && candidate->is_compiled()) {
- SmartArrayPointer<char> name = candidate->DebugName()->ToCString();
- PrintF("[code-flushing clears: %s]\n", *name);
+ PrintF("[code-flushing clears: ");
+ candidate->ShortPrint();
+ PrintF(" - age: %d]\n", code->GetAge());
}
candidate->set_code(lazy_compile);
}
@@ -1086,7 +1096,7 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
continue;
}
- // Update and record the context slot in the optimizled code map.
+ // Update and record the context slot in the optimized code map.
Object** context_slot = HeapObject::RawField(code_map,
FixedArray::OffsetOfElementAt(new_length));
code_map->set(new_length++, code_map->get(i + kContextOffset));
@@ -1131,8 +1141,9 @@ void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
if (FLAG_trace_code_flushing) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
- PrintF("[code-flushing abandons function-info: %s]\n", *name);
+ PrintF("[code-flushing abandons function-info: ");
+ shared_info->ShortPrint();
+ PrintF("]\n");
}
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
@@ -1167,8 +1178,9 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
if (FLAG_trace_code_flushing) {
- SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
- PrintF("[code-flushing abandons closure: %s]\n", *name);
+ PrintF("[code-flushing abandons closure: ");
+ function->shared()->ShortPrint();
+ PrintF("]\n");
}
JSFunction* candidate = jsfunction_candidates_head_;
@@ -1202,8 +1214,9 @@ void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
if (FLAG_trace_code_flushing) {
- SmartArrayPointer<char> name = code_map_holder->DebugName()->ToCString();
- PrintF("[code-flushing abandons code-map: %s]\n", *name);
+ PrintF("[code-flushing abandons code-map: ");
+ code_map_holder->ShortPrint();
+ PrintF("]\n");
}
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
@@ -1307,7 +1320,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
InstanceType type = map->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
- Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
+ Object* second = reinterpret_cast<ConsString*>(object)->second();
Heap* heap = map->GetHeap();
if (second != heap->empty_string()) {
return object;
@@ -1316,7 +1329,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// Since we don't have the object's start, it is impossible to update the
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
+ Object* first = reinterpret_cast<ConsString*>(object)->first();
if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
*p = first;
@@ -1488,15 +1501,13 @@ class MarkCompactMarkingVisitor
FIXED_ARRAY_TYPE) return;
// Make sure this is a RegExp that actually contains code.
- if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
+ if (re->TypeTag() != JSRegExp::IRREGEXP) return;
- Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
+ Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
if (!code->IsSmi() &&
HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
// Save a copy that can be reinstated if we need the code again.
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- code,
- heap);
+ re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
// Saving a copy might create a pointer into compaction candidate
// that was not observed by marker. This might happen if JSRegExp data
@@ -1508,9 +1519,8 @@ class MarkCompactMarkingVisitor
RecordSlot(slot, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(heap->sweep_generation() & 0xff),
- heap);
+ re->SetDataAt(JSRegExp::code_index(is_ascii),
+ Smi::FromInt(heap->sweep_generation() & 0xff));
} else if (code->IsSmi()) {
int value = Smi::cast(code)->value();
// The regexp has not been compiled yet or there was a compilation error.
@@ -1521,12 +1531,10 @@ class MarkCompactMarkingVisitor
// Check if we should flush now.
if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
+ re->SetDataAt(JSRegExp::code_index(is_ascii),
+ Smi::FromInt(JSRegExp::kUninitializedValue));
+ re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
+ Smi::FromInt(JSRegExp::kUninitializedValue));
}
}
}
@@ -1706,23 +1714,6 @@ VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
MarkCompactMarkingVisitor::non_count_table_;
-class MarkingVisitor : public ObjectVisitor {
- public:
- explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- MarkCompactMarkingVisitor::VisitPointer(heap_, p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
- }
-
- private:
- Heap* heap_;
-};
-
-
class CodeMarkingVisitor : public ThreadVisitor {
public:
explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -2031,14 +2022,13 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
}
-void MarkCompactCollector::MarkStringTable() {
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
SetMark(string_table, string_table_mark);
// Explicitly mark the prefix.
- MarkingVisitor marker(heap());
- string_table->IteratePrefix(&marker);
+ string_table->IteratePrefix(visitor);
ProcessMarkingDeque();
}
@@ -2049,7 +2039,7 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the string table specially.
- MarkStringTable();
+ MarkStringTable(visitor);
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
@@ -2148,6 +2138,11 @@ void MarkCompactCollector::RefillMarkingDeque() {
heap()->cell_space());
if (marking_deque_.IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->property_cell_space());
+ if (marking_deque_.IsFull()) return;
+
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(heap(),
&marking_deque_,
@@ -2187,6 +2182,24 @@ void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
}
+void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
+ for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
+ !it.done(); it.Advance()) {
+ if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+ return;
+ }
+ if (it.frame()->type() == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (!code->CanDeoptAt(it.frame()->pc())) {
+ code->CodeIterateBody(visitor);
+ }
+ ProcessMarkingDeque();
+ return;
+ }
+ }
+}
+
+
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
@@ -2241,20 +2254,33 @@ void MarkCompactCollector::MarkLiveObjects() {
HeapObjectIterator cell_iterator(heap()->cell_space());
HeapObject* cell;
while ((cell = cell_iterator.Next()) != NULL) {
- ASSERT(cell->IsJSGlobalPropertyCell());
+ ASSERT(cell->IsCell());
if (IsMarked(cell)) {
- int offset = JSGlobalPropertyCell::kValueOffset;
+ int offset = Cell::kValueOffset;
MarkCompactMarkingVisitor::VisitPointer(
heap(),
reinterpret_cast<Object**>(cell->address() + offset));
}
}
}
+ {
+ HeapObjectIterator js_global_property_cell_iterator(
+ heap()->property_cell_space());
+ HeapObject* cell;
+ while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
+ ASSERT(cell->IsPropertyCell());
+ if (IsMarked(cell)) {
+ MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
+ }
+ }
+ }
}
RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
+ ProcessTopOptimizedFrame(&root_visitor);
+
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic or through Harmony weak maps.
@@ -2386,7 +2412,6 @@ void MarkCompactCollector::ReattachInitialMaps() {
for (HeapObject* obj = map_iterator.Next();
obj != NULL;
obj = map_iterator.Next()) {
- if (obj->IsFreeSpace()) continue;
Map* map = Map::cast(obj);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
@@ -2400,36 +2425,45 @@ void MarkCompactCollector::ReattachInitialMaps() {
void MarkCompactCollector::ClearNonLiveReferences() {
- HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
// is carried out only on maps of JSObjects and related subtypes.
+ HeapObjectIterator map_iterator(heap()->map_space());
for (HeapObject* obj = map_iterator.Next();
- obj != NULL; obj = map_iterator.Next()) {
- Map* map = reinterpret_cast<Map*>(obj);
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (map->IsFreeSpace()) continue;
+ obj != NULL;
+ obj = map_iterator.Next()) {
+ Map* map = Map::cast(obj);
- ASSERT(map->IsMap());
if (!map->CanTransition()) continue;
- if (map_mark.Get() &&
- map->attached_to_shared_function_info()) {
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (map_mark.Get() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
- map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
+ JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
}
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
if (map_mark.Get()) {
- ClearNonLiveDependentCode(map);
+ ClearNonLiveDependentCode(map->dependent_code());
} else {
ClearAndDeoptimizeDependentCode(map);
}
}
+
+ // Iterate over property cell space, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ HeapObjectIterator cell_iterator(heap_->property_cell_space());
+ for (HeapObject* cell = cell_iterator.Next();
+ cell != NULL;
+ cell = cell_iterator.Next()) {
+ if (IsMarked(cell)) {
+ ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+ }
+ }
}
@@ -2449,13 +2483,11 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int proto_index = proto_offset + new_number_of_transitions * step;
int map_index = map_offset + new_number_of_transitions * step;
if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
+ prototype_transitions->set(
proto_index,
prototype,
UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
+ prototype_transitions->set(
map_index,
cached_map,
SKIP_WRITE_BARRIER);
@@ -2504,19 +2536,21 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
for (int i = 0; i < number_of_entries; i++) {
+ // If the entry is compilation info then the map must be alive,
+ // and ClearAndDeoptimizeDependentCode shouldn't be called.
+ ASSERT(entries->is_code_at(i));
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
}
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
-void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
DisallowHeapAllocation no_allocation;
- DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
@@ -2525,15 +2559,17 @@ void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
for (int g = 0; g < DependentCode::kGroupCount; g++) {
int group_number_of_entries = 0;
for (int i = starts.at(g); i < starts.at(g + 1); i++) {
- Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ Object* obj = entries->object_at(i);
+ ASSERT(obj->IsCode() || IsMarked(obj));
+ if (IsMarked(obj) &&
+ (!obj->IsCode() || !Code::cast(obj)->marked_for_deoptimization())) {
if (new_number_of_entries + group_number_of_entries != i) {
- entries->set_code_at(new_number_of_entries +
- group_number_of_entries, code);
+ entries->set_object_at(
+ new_number_of_entries + group_number_of_entries, obj);
}
- Object** slot = entries->code_slot_at(new_number_of_entries +
- group_number_of_entries);
- RecordSlot(slot, slot, code);
+ Object** slot = entries->slot_at(new_number_of_entries +
+ group_number_of_entries);
+ RecordSlot(slot, slot, obj);
group_number_of_entries++;
}
}
@@ -2543,7 +2579,7 @@ void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
new_number_of_entries += group_number_of_entries;
}
for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
}
@@ -3102,6 +3138,11 @@ static void SweepPrecisely(PagedSpace* space,
Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
space->Free(free_start, static_cast<int>(free_end - free_start));
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+ GDBJITInterface::RemoveCodeRange(free_start, free_end);
+ }
+#endif
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -3128,6 +3169,11 @@ static void SweepPrecisely(PagedSpace* space,
}
if (free_start != p->area_end()) {
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+ GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
+ }
+#endif
}
p->ResetLiveBytes();
if (FLAG_print_cumulative_gc_stat) {
@@ -3258,11 +3304,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
-
EvacuateNewSpace();
}
-
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
@@ -3384,11 +3428,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
for (HeapObject* cell = cell_iterator.Next();
cell != NULL;
cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ if (cell->IsCell()) {
+ Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+ }
+ }
+
+ HeapObjectIterator js_global_property_cell_iterator(
+ heap_->property_cell_space());
+ for (HeapObject* cell = js_global_property_cell_iterator.Next();
+ cell != NULL;
+ cell = js_global_property_cell_iterator.Next()) {
+ if (cell->IsPropertyCell()) {
+ PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
}
}
@@ -4050,6 +4101,7 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(heap()->code_space(), PRECISE);
SweepSpace(heap()->cell_space(), PRECISE);
+ SweepSpace(heap()->property_cell_space(), PRECISE);
EvacuateNewSpaceAndCandidates();
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 873534c2fa..ab3711a386 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -835,7 +835,7 @@ class MarkCompactCollector {
// Mark the string table specially. References to internalized strings from
// the string table are weak.
- void MarkStringTable();
+ void MarkStringTable(RootMarkingVisitor* visitor);
// Mark objects in implicit references groups if their parent object
// is marked.
@@ -853,6 +853,11 @@ class MarkCompactCollector {
// or implicit references' groups.
void ProcessEphemeralMarking(ObjectVisitor* visitor);
+ // If the call-site of the top optimized code was not prepared for
+ // deoptimization, then treat the maps in the code as strong pointers,
+ // otherwise a map can die and deoptimize the code.
+ void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
@@ -880,7 +885,7 @@ class MarkCompactCollector {
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
void ClearAndDeoptimizeDependentCode(Map* map);
- void ClearNonLiveDependentCode(Map* map);
+ void ClearNonLiveDependentCode(DependentCode* dependent_code);
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/marking-thread.cc
index 574485abc7..ac9f944fe7 100644
--- a/deps/v8/src/marking-thread.cc
+++ b/deps/v8/src/marking-thread.cc
@@ -73,6 +73,7 @@ void MarkingThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_marking_semaphore_->Signal();
stop_semaphore_->Wait();
+ Join();
}
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index ce075ce5e5..137d98fe7b 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -108,14 +108,23 @@ var kMessages = {
["%0"," constructor should have at least one argument."],
not_typed_array: ["this is not a typed array."],
invalid_argument: ["invalid_argument"],
+ data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
+ constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
- invalid_typed_array_offset: ["Start offset is too large"],
- invalid_typed_array_length: ["Length is too large"],
+ invalid_typed_array_offset: ["Start offset is too large:"],
+ invalid_typed_array_length: ["Invalid typed array length"],
invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
typed_array_set_source_too_large:
["Source is too large"],
+ typed_array_set_negative_offset:
+ ["Start offset is negative"],
+ invalid_data_view_offset: ["Start offset is outside the bounds of the buffer"],
+ invalid_data_view_length: ["Invalid data view length"],
+ invalid_data_view_accessor_offset:
+ ["Offset is outside the bounds of the DataView"],
+
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 3cb222c46f..38473b56d1 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1 +1,2 @@
plind44@gmail.com
+gergely@homejinni.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index d922bfac66..2ca00831cf 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -202,6 +202,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -232,24 +233,22 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -345,8 +344,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
@@ -373,8 +372,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index eee79a2156..c4fefcc512 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -35,7 +35,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
@@ -1475,7 +1475,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
void Assembler::stop(const char* msg, uint32_t code) {
ASSERT(code > kMaxWatchpointCode);
ASSERT(code <= kMaxStopCode);
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
break_(0x54321);
#else // V8_HOST_ARCH_MIPS
BlockTrampolinePoolFor(2);
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 06273caf78..35d21f05e6 100644..100755
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "debug.h"
@@ -108,372 +108,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, zero_reg);
- __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ Addu(scratch1, result, Operand(JSArray::kSize));
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
- }
- } else {
- Label loop, entry;
- __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, lt, scratch1, Operand(scratch2));
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ Assert(
- ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ li(elements_array_end,
- (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
- __ sra(scratch1, array_size, kSmiTagSize);
- __ Addu(elements_array_end, elements_array_end, scratch1);
- __ Allocate(elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- STATIC_ASSERT(kSmiTag == 0);
-
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ sw(array_size, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements_array_end, elements_array_storage, elements_array_end);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// a0: argc
-// a1: constructor (built-in Array function)
-// ra: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in a1 needs to be preserved for
-// entering the generic code. In both cases argc in a0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
- // Set up return value, remove receiver from stack and return.
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ Branch(&argc_two_or_more, ne, a0, Operand(1));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
- __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
- __ Drop(1); // Adjust stack.
- __ mov(a0, zero_reg); // Treat this as a call with argc of zero.
- __ Branch(&empty_array);
-
- __ bind(&not_empty_array);
- __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
- __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(call_generic_code, Ugreater_equal, a2,
- Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
-
- // Set up return value, remove receiver and argument from stack and return.
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store.
- // a0: argc
- // a3: JSArray
- // t0: elements_array storage start (untagged)
- // t1: elements_array_end (untagged)
- // sp[0]: last argument
-
- Label loop, entry;
- __ Branch(USE_DELAY_SLOT, &entry);
- __ mov(t3, sp);
- __ bind(&loop);
- __ lw(a2, MemOperand(t3));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(a2, &has_non_smi_element);
- }
- __ Addu(t3, t3, kPointerSize);
- __ Addu(t1, t1, -kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ bind(&entry);
- __ Branch(&loop, lt, t0, Operand(t1));
-
- __ bind(&finish);
- __ mov(sp, t3);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // a0: argc
- // a3: JSArray
- // sp[0]: receiver
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(a3, t0);
- __ Branch(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // a3: JSArray
- __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a2,
- t5,
- &cant_transition_map);
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ RecordWriteField(a3,
- HeapObject::kMapOffset,
- a2,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ bind(&loop2);
- __ lw(a2, MemOperand(t3));
- __ Addu(t3, t3, kPointerSize);
- __ Subu(t1, t1, kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ Branch(&loop2, lt, t0, Operand(t1));
- __ Branch(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -498,20 +132,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- if (FLAG_optimize_constructed_arrays) {
- // Tail call a stub.
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
+ // Tail call a stub.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -538,58 +161,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- if (FLAG_optimize_constructed_arrays) {
- // Tail call a stub.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : type info cell
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (3)",
- t0, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (4)",
- t0, Operand(MAP_TYPE));
- }
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // Tail call a stub.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -695,7 +273,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0);
+ __ push(a0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
__ pop(function);
@@ -1162,6 +740,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// -- a3: argc
// -- s0: argv
// -----------------------------------
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the JS frame.
__ mov(cp, zero_reg);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 3d0577eb1e..69b957afa8 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "bootstrapper.h"
#include "code-stubs.h"
@@ -1181,12 +1181,17 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for
// internalized-internalized.
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ Label not_internalized;
STATIC_ASSERT(kInternalizedTag != 0);
- __ And(t2, a2, Operand(a3));
- __ And(t0, t2, Operand(kIsInternalizedMask));
- __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+ __ And(t2, a2, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ Branch(&not_internalized, ne, t2,
+ Operand(kInternalizedTag | kStringTag));
+
+ __ And(a3, a3, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ Branch(&return_not_equal, eq, a3,
+ Operand(kInternalizedTag | kStringTag));
+
+ __ bind(&not_internalized);
}
@@ -1220,8 +1225,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
ASSERT((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
- // a2 is object type of lhs.
- // Ensure that no non-strings have the internalized bit set.
+ // a2 is object type of rhs.
Label object_test;
STATIC_ASSERT(kInternalizedTag != 0);
__ And(at, a2, Operand(kIsNotStringMask));
@@ -2326,7 +2330,7 @@ void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label right_arg_changed, call_runtime;
- if (op_ == Token::MOD && has_fixed_right_arg_) {
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ Branch(&right_arg_changed,
@@ -2488,47 +2492,36 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
+ if (result_type_ <= BinaryOpIC::INT32) {
Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
+ const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
+ kRoundToMinusInf : kRoundToZero;
+ const CheckForInexactConversion kConversion = op_ == Token::DIV ?
+ kCheckForInexactConversion : kDontCheckForInexactConversion;
+ __ EmitFPUTruncate(kRoundingMode,
scratch1,
f10,
at,
f16,
- except_flag);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- // If not try to return a heap number.
+ except_flag,
+ kConversion);
+ // If except_flag != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg));
+ // Try to tag the result as a Smi, return heap number on overflow.
+ __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero. Return heap number for minus zero if
- // double results are allowed; otherwise transition.
+ // Check for minus zero, transition in that case (because we need
+ // to return a heap number).
Label not_zero;
+ ASSERT(kSmiTag == 0);
__ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
__ mfc1(scratch2, f11);
__ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition
- : &return_heap_number,
- ne,
- scratch2,
- Operand(zero_reg));
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
__ bind(&not_zero);
- // Tag the result and return.
__ Ret(USE_DELAY_SLOT);
- __ SmiTag(v0, scratch1); // SmiTag emits one instruction.
- } else {
- // DIV just falls through to allocating a heap number.
+ __ mov(v0, scratch1);
}
__ bind(&return_heap_number);
@@ -2552,7 +2545,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// to type transition.
} else {
- if (has_fixed_right_arg_) {
+ if (encoded_right_arg_.has_value) {
__ Move(f16, fixed_right_arg_value());
__ BranchF(&transition, NULL, ne, f14, f16);
}
@@ -3350,9 +3343,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
- if (FLAG_optimize_constructed_arrays) {
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
- }
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@@ -3539,6 +3530,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
@@ -3632,6 +3625,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// 4 args slots
// args
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
@@ -3860,7 +3855,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Subu(inline_site, ra, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
}
// Register mapping: a3 is object map and t0 is function prototype.
@@ -5029,55 +5024,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // a1 : the function to call
- // a2 : cache cell for call target
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, eq, a3, Operand(at));
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
- // An uninitialized cache is patched with the function.
- // Store a1 in the delay slot. This may or may not get overwritten depending
- // on the result of the comparison.
- __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- __ bind(&done);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
- ASSERT(FLAG_optimize_constructed_arrays);
Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -5086,7 +5038,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->the_hole_value());
// Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -5097,11 +5049,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Special handling of the Array() function, which caches not only the
// monomorphic Array function but the initial ElementsKind with special
// sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(a3, &miss);
- __ Branch(&miss, gt, a3, Operand(terminal_kind_sentinel));
+ if (FLAG_debug_code) {
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ Assert(le, "Array function sentinel is not an ElementsKind",
+ a3, Operand(terminal_kind_sentinel));
+ }
+
// Make sure the function is the Array() function
__ LoadArrayFunction(a3);
__ Branch(&megamorphic, ne, a1, Operand(a3));
@@ -5117,7 +5073,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// write-barrier is needed.
__ bind(&megamorphic);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
@@ -5134,11 +5090,11 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
GetInitialFastElementsKind());
__ li(a3, Operand(initial_kind_sentinel));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
__ Branch(&done);
__ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -5177,11 +5133,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now.
@@ -5214,7 +5166,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
}
// Check for function proxy.
__ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
@@ -5255,15 +5207,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
+ Register jmp_reg = a3;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -6676,9 +6624,13 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
- __ And(tmp1, tmp1, Operand(tmp2));
- __ And(tmp1, tmp1, kIsInternalizedMask);
- __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+
+ __ And(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ Branch(&miss, ne, tmp1, Operand(kInternalizedTag | kStringTag));
+
+ __ And(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
+ __ Branch(&miss, ne, tmp2, Operand(kInternalizedTag | kStringTag));
+
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
@@ -6718,17 +6670,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- Label succeed1;
- __ And(at, tmp1, Operand(kIsInternalizedMask));
- __ Branch(&succeed1, ne, at, Operand(zero_reg));
- __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE));
- __ bind(&succeed1);
-
- Label succeed2;
- __ And(at, tmp2, Operand(kIsInternalizedMask));
- __ Branch(&succeed2, ne, at, Operand(zero_reg));
- __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE));
- __ bind(&succeed2);
+ __ JumpIfNotUniqueName(tmp1, &miss);
+ __ JumpIfNotUniqueName(tmp2, &miss);
// Use a0 as result
__ mov(v0, a0);
@@ -6791,7 +6734,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
// Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
+ // because we already know they are not identical. We know they are both
+ // strings.
if (equality) {
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag != 0);
@@ -6912,13 +6856,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ li(t9, Operand(function));
- this->GenerateCall(masm, t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ Move(t9, target);
__ AssertStackIsAligned();
@@ -7003,10 +6940,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(scratch0, entity_name, Operand(kIsInternalizedMask));
- __ Branch(&good, ne, scratch0, Operand(zero_reg));
- __ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE));
-
+ __ JumpIfNotUniqueName(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -7180,14 +7114,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
- Label cont;
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ And(result, entry_key, Operand(kIsInternalizedMask));
- __ Branch(&cont, ne, result, Operand(zero_reg));
- __ Branch(&maybe_in_dictionary, ne, entry_key, Operand(SYMBOL_TYPE));
- __ bind(&cont);
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
}
}
@@ -7500,10 +7430,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : element value to store
- // -- a1 : array literal
- // -- a2 : map of array literal
// -- a3 : element index as smi
- // -- t0 : array literal index in function as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers a1, a2, t0
// -----------------------------------
Label element_done;
@@ -7512,6 +7442,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ lw(t0, MemOperand(sp, 0 * kPointerSize));
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+
__ CheckFastElements(a2, t1, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(a0, &smi_element);
@@ -7579,7 +7514,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(ra);
__ CallStub(&stub);
@@ -7594,9 +7530,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
- // Save live volatile registers.
- __ Push(ra, t1, a1);
- const int32_t kNumSavedRegs = 3;
+ // This should contain all kJSCallerSaved registers.
+ const RegList kSavedRegs =
+ kJSCallerSaved | // Caller saved registers.
+ s5.bit(); // Saved stack pointer.
+
+ // We also save ra, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ MultiPush(kSavedRegs | ra.bit());
// Compute the function's address for the first argument.
__ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
@@ -7608,20 +7551,19 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Align the stack if necessary.
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
- __ mov(t1, sp);
+ __ mov(s5, sp);
ASSERT(IsPowerOf2(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
#if defined(V8_HOST_ARCH_MIPS)
- __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ lw(at, MemOperand(at));
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+ __ li(at, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ li(at, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
@@ -7630,10 +7572,11 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
- __ mov(sp, t1);
+ __ mov(sp, s5);
}
- __ Pop(ra, t1, a1);
+ // Also pop ra to get Ret(0).
+ __ MultiPop(kSavedRegs | ra.bit());
__ Ret();
}
@@ -7687,6 +7630,10 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(1));
__ Branch(&normal_sequence, eq, a2, Operand(undefined_sentinel));
+ // The type cell may have gone megamorphic, don't overwrite if so.
+ __ lw(t1, FieldMemOperand(a2, kPointerSize));
+ __ JumpIfNotSmi(t1, &normal_sequence);
+
// Save the resulting elements kind in type info
__ SmiTag(a3);
__ sw(a3, FieldMemOperand(a2, kPointerSize));
@@ -7718,7 +7665,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, true);
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7776,61 +7723,47 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(eq, "Unexpected initial map for Array function",
t0, Operand(MAP_TYPE));
- // We should either have undefined in ebx or a valid jsglobalpropertycell
+ // We should either have undefined in a2 or a valid cell
Label okay_here;
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ Branch(&okay_here, eq, a2, Operand(undefined_sentinel));
__ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, "Expected property cell in register ebx",
- a3, Operand(global_property_cell_map));
+ __ Assert(eq, "Expected property cell in register a2",
+ a3, Operand(cell_map));
__ bind(&okay_here);
}
- if (FLAG_optimize_constructed_arrays) {
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
- __ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- __ JumpIfNotSmi(a3, &no_info);
- __ SmiUntag(a3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ li(a3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ And(at, a0, a0);
- __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ Label no_info, switch_ready;
+ // Get the elements kind and case on that.
+ __ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ JumpIfNotSmi(a3, &no_info);
+ __ SmiUntag(a3);
+ __ jmp(&switch_ready);
+ __ bind(&no_info);
+ __ li(a3, Operand(GetInitialFastElementsKind()));
+ __ bind(&switch_ready);
+
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ UNREACHABLE();
}
}
@@ -7891,43 +7824,30 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
t0, Operand(MAP_TYPE));
}
- if (FLAG_optimize_constructed_arrays) {
- // Figure out the right elements kind.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into a3. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ // Figure out the right elements kind.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
- Label done;
- __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
- __ Assert(
- eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
- a3, Operand(FAST_HOLEY_ELEMENTS));
- __ bind(&done);
- }
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
- Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+ __ Assert(
+ eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
+ a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
- __ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index ec7d147988..bf5db10f63 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -599,8 +599,6 @@ class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm,
- ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 72eb00bca4..7a95bc426b 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index ddfa891326..a20ec5479a 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "constants-mips.h"
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 93ebeda800..d13b23330f 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -36,7 +36,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "cpu.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 0ae01875e5..30cc4db634 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "debug.h"
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index b787f13fef..708df39d24 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -56,7 +56,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#include "disasm.h"
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 540caa9d0d..1bd511654a 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -28,25 +28,17 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "assembler.h"
#include "assembler-mips.h"
#include "assembler-mips-inl.h"
-#include "frames-inl.h"
-#include "mips/assembler-mips-inl.h"
-#include "macro-assembler.h"
-#include "macro-assembler-mips.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 7368eada62..032c1f5e5b 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
// Note on Mips implementation:
//
@@ -138,7 +138,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -326,9 +326,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ li(a2, Operand(profiling_counter_));
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
__ Subu(a3, a3, Operand(Smi::FromInt(delta)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
@@ -344,7 +344,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
@@ -363,7 +363,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
@@ -406,7 +406,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -1164,15 +1164,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1696,11 +1694,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
+ __ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1836,13 +1834,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(v0);
+ __ push(v0); // array literal
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1850,7 +1846,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ lw(t2, MemOperand(sp)); // Copy of array literal.
+ __ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
__ sw(result_register(), FieldMemOperand(a1, offset));
// Update the write barrier for the array store.
@@ -1858,10 +1854,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ lw(a1, MemOperand(sp)); // Copy of array literal.
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
__ li(a3, Operand(Smi::FromInt(i)));
- __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
__ mov(a0, result_register());
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
@@ -1870,6 +1863,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
+ __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -1998,22 +1992,36 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::INITIAL:
- case Yield::SUSPEND: {
- VisitForStackValue(expr->generator_object());
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ lw(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
- Label resume;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&resume, ne, result_register(), Operand(at));
- if (expr->yield_kind() == Yield::SUSPEND) {
- EmitReturnIteratorResult(false);
- } else {
- __ pop(result_register());
- EmitReturnSequence();
- }
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+ __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+ __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ Branch(&post_runtime, eq, sp, Operand(a1));
+ __ push(v0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -2025,7 +2033,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sw(a1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- EmitReturnIteratorResult(true);
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
break;
}
@@ -2036,83 +2047,70 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Branch(&l_next);
- // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
__ mov(a0, v0);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(a3); // iter
__ push(a0); // exception
- __ mov(a0, a3); // iter
- __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
- Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(throw_ic); // iter.throw in a0
- __ mov(a0, v0);
__ jmp(&l_call);
- // try { received = yield result.value }
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
__ bind(&l_try);
- __ pop(a0); // result.value
+ __ pop(a0); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(a0); // result.value
- __ lw(a3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size)); // g
- __ push(a3); // g
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ push(a0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
__ mov(a0, v0);
- __ lw(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&l_resume, ne, a0, Operand(at));
- EmitReturnIteratorResult(false);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ lw(a0, MemOperand(sp, generator_object_depth));
+ __ push(a0); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
+ __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(v0); // result
+ EmitReturnSequence();
__ mov(a0, v0);
__ bind(&l_resume); // received in a0
__ PopTryHandler();
- // receiver = iter; f = iter.next; arg = received;
+ // receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(a3); // iter
__ push(a0); // received
- __ mov(a0, a3); // iter
- __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
- Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(next_ic); // iter.next in a0
- __ mov(a0, v0);
- // result = f.call(receiver, arg);
+ // result = receiver[f](arg);
__ bind(&l_call);
- Label l_call_runtime;
- __ JumpIfSmi(a0, &l_call_runtime);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&l_call_runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
- __ mov(a1, a0);
- ParameterCount count(1);
- __ InvokeFunction(a1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&l_loop);
- __ bind(&l_call_runtime);
- __ push(a0);
- __ CallRuntime(Runtime::kCall, 3);
- // val = result.value; if (!result.done) goto l_try;
+ // if (!result.done) goto l_try;
__ bind(&l_loop);
__ mov(a0, v0);
- // result.value
__ push(a0); // save result
- __ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in a0
- __ mov(a0, v0);
- __ pop(a1); // result
- __ push(a0); // result.value
- __ mov(a0, a1); // result
__ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in v0
@@ -2122,7 +2120,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Branch(&l_try, eq, v0, Operand(zero_reg));
// result.value
- __ pop(v0); // result.value
+ __ pop(a0); // result
+ __ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in v0
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2162,7 +2163,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ Subu(a3, a3, Operand(1));
+ __ Subu(a3, a3, Operand(Smi::FromInt(1)));
__ Branch(&push_frame, lt, a3, Operand(zero_reg));
__ push(a2);
__ jmp(&push_argument_holes);
@@ -2226,13 +2227,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
-void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), a0, a2, a3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ lw(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ li(a1, Operand(map));
@@ -2252,27 +2260,7 @@ void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
// root set.
__ RecordWriteField(a0, JSGeneratorObject::kResultValuePropertyOffset,
a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
- if (done) {
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- }
-
__ mov(result_register(), a0);
- EmitReturnSequence();
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ lw(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&allocated);
}
@@ -2280,7 +2268,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
+ __ li(a2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -2442,7 +2430,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(a1, result_register());
__ pop(a0); // Restore value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2573,7 +2561,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(a0, result_register()); // Load the value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
Handle<Code> ic = is_classic_mode()
@@ -2711,8 +2699,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ li(a2, Operand(cell));
@@ -2852,7 +2839,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2906,8 +2893,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ li(a2, Operand(cell));
@@ -3458,7 +3444,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3890,7 +3876,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
@@ -4565,7 +4551,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(a0, result_register()); // Value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
+ __ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index c1b4e1e056..896e03007b 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "code-stubs.h"
@@ -326,7 +326,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
__ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
- // Is the string internalized?
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
@@ -1588,8 +1589,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 1c8973fe7e..8109e8a288 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -62,7 +62,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
@@ -87,20 +87,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
- for (int i = 0 ; i < transition_maps_.length(); i++) {
- transition_maps_.at(i)->AddDependentCode(
- DependentCode::kTransitionGroup, code);
- }
- if (graph()->depends_on_empty_array_proto_elements()) {
- isolate()->initial_object_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- isolate()->initial_array_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- }
+ info()->CommitDependencies(code);
}
@@ -256,7 +243,6 @@ bool LCodeGen::GeneratePrologue() {
if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- EnsureSpaceForLazyDeopt();
return !is_aborted();
}
@@ -284,6 +270,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
+ EnsureSpaceForLazyDeopt();
return !is_aborted();
}
@@ -574,27 +561,15 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
+ WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
@@ -626,60 +601,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
}
+ continue;
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
+ environment->HasUint32ValueAt(i));
}
}
@@ -687,17 +631,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ bool is_uint32) {
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -744,6 +679,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
+ EnsureSpaceForLazyDeopt();
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@@ -792,8 +728,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -801,7 +735,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -836,7 +770,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_trap_on_deopt) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label skip;
if (cc != al) {
__ Branch(&skip, NegateCondition(cc), src1, src2);
@@ -1140,7 +1074,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ // Record the address of the first unknown OSR value as the place to enter.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
@@ -1176,9 +1111,37 @@ void LCodeGen::DoModI(LModI* instr) {
__ And(result_reg, scratch, divisor - 1);
__ bind(&done);
- } else {
- // TODO(svenpanne) Add right->has_fixed_right_arg() case.
+ } else if (hmod->fixed_right_arg().has_value) {
+ const Register scratch = scratch0();
+ const Register left_reg = ToRegister(instr->left());
+ const Register result_reg = ToRegister(instr->result());
+
+ Register right_reg = EmitLoadRegister(instr->right(), scratch);
+ int32_t divisor = hmod->fixed_right_arg().value;
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ Branch(USE_DELAY_SLOT, &left_is_not_negative,
+ ge, left_reg, Operand(zero_reg));
+ __ subu(result_reg, zero_reg, left_reg);
+ __ And(result_reg, result_reg, divisor - 1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(result_reg, zero_reg, result_reg);
+ }
+
+ __ bind(&left_is_not_negative);
+ __ And(result_reg, left_reg, divisor - 1);
+ __ bind(&done);
+
+ } else {
const Register scratch = scratch0();
const Register left_reg = ToRegister(instr->left());
const Register result_reg = ToRegister(instr->result());
@@ -1216,6 +1179,109 @@ void LCodeGen::DoModI(LModI* instr) {
}
+void LCodeGen::EmitSignedIntegerDivisionByConstant(
+ Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment) {
+ ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+ ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+
+ uint32_t divisor_abs = abs(divisor);
+
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+
+ switch (divisor_abs) {
+ case 0:
+ DeoptimizeIf(al, environment);
+ return;
+
+ case 1:
+ if (divisor > 0) {
+ __ Move(result, dividend);
+ } else {
+ __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
+ DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
+ }
+ // Compute the remainder.
+ __ Move(remainder, zero_reg);
+ return;
+
+ default:
+ if (IsPowerOf2(divisor_abs)) {
+ // Branch and condition free code for integer division by a power
+ // of two.
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (power > 1) {
+ __ sra(scratch, dividend, power - 1);
+ }
+ __ srl(scratch, scratch, 32 - power);
+ __ Addu(scratch, dividend, Operand(scratch));
+ __ sra(result, scratch, power);
+ // Negate if necessary.
+ // We don't need to check for overflow because the case '-1' is
+ // handled separately.
+ if (divisor < 0) {
+ ASSERT(divisor != -1);
+ __ Subu(result, zero_reg, Operand(result));
+ }
+ // Compute the remainder.
+ if (divisor > 0) {
+ __ sll(scratch, result, power);
+ __ Subu(remainder, dividend, Operand(scratch));
+ } else {
+ __ sll(scratch, result, power);
+ __ Addu(remainder, dividend, Operand(scratch));
+ }
+ return;
+ } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
+ // Use magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer's Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ // Branch and condition free code for integer division by a power
+ // of two.
+ const int32_t M = magic_numbers.M;
+ const int32_t s = magic_numbers.s + power_of_2_factor;
+
+ __ li(scratch, Operand(M));
+ __ mult(dividend, scratch);
+ __ mfhi(scratch);
+ if (M < 0) {
+ __ Addu(scratch, scratch, Operand(dividend));
+ }
+ if (s > 0) {
+ __ sra(scratch, scratch, s);
+ __ mov(scratch, scratch);
+ }
+ __ srl(at, dividend, 31);
+ __ Addu(result, scratch, Operand(at));
+ if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
+ // Compute the remainder.
+ __ li(scratch, Operand(divisor));
+ __ Mul(scratch, result, Operand(scratch));
+ __ Subu(remainder, dividend, Operand(scratch));
+ } else {
+ __ li(scratch, Operand(divisor));
+ __ div(dividend, scratch);
+ __ mfhi(remainder);
+ __ mflo(result);
+ }
+ }
+}
+
+
void LCodeGen::DoDivI(LDivI* instr) {
const Register left = ToRegister(instr->left());
const Register right = ToRegister(instr->right());
@@ -1246,8 +1312,10 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_min_int);
}
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mfhi(result);
+ DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ }
__ mflo(result);
}
@@ -1264,6 +1332,70 @@ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ const Register result = ToRegister(instr->result());
+ const Register left = ToRegister(instr->left());
+ const Register remainder = ToRegister(instr->temp());
+ const Register scratch = scratch0();
+
+ if (instr->right()->IsConstantOperand()) {
+ Label done;
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ if (divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ }
+ EmitSignedIntegerDivisionByConstant(result,
+ left,
+ divisor,
+ remainder,
+ scratch,
+ instr->environment());
+ // We performed a truncating division. Correct the result if necessary.
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(scratch , remainder, Operand(divisor));
+ __ Branch(&done, ge, scratch, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ } else {
+ Label done;
+ const Register right = ToRegister(instr->right());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(left, right);
+
+ // Check for x / 0.
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ __ mfhi(remainder);
+ __ mflo(result);
+
+ // We performed a truncating division. Correct the result if necessary.
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(scratch , remainder, Operand(right));
+ __ Branch(&done, ge, scratch, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -1543,13 +1675,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
@@ -1577,9 +1702,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register map = ToRegister(instr->temp());
Label done;
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ Move(result, input);
+ __ JumpIfSmi(input, &done);
+ }
// If the object is not a value type, return the object.
__ GetObjectType(input, map, map);
@@ -1841,12 +1968,13 @@ int LCodeGen::GetNextEmittedBlock() const {
return -1;
}
-
-void LCodeGen::EmitBranch(int left_block, int right_block,
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
Condition cc, Register src1, const Operand& src2) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
int next_block = GetNextEmittedBlock();
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
} else if (left_block == next_block) {
@@ -1861,11 +1989,13 @@ void LCodeGen::EmitBranch(int left_block, int right_block,
}
-void LCodeGen::EmitBranchF(int left_block, int right_block,
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
Condition cc, FPURegister src1, FPURegister src2) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
int next_block = GetNextEmittedBlock();
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
} else if (left_block == next_block) {
@@ -1886,19 +2016,16 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
+ EmitBranchF(instr, nue, reg, kDoubleRegZero);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -1906,40 +2033,50 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (type.IsBoolean()) {
ASSERT(!info()->IsStub());
__ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ EmitBranch(instr, eq, reg, Operand(at));
} else if (type.IsSmi()) {
ASSERT(!info()->IsStub());
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ DoubleRegister dbl_scratch = double_scratch0();
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(true_label, eq, reg, Operand(at));
+ __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- __ Branch(false_label, eq, reg, Operand(zero_reg));
- __ JumpIfSmi(reg, true_label);
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ And(at, reg, Operand(kSmiTagMask));
@@ -1953,14 +2090,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
- __ Branch(false_label, ne, at, Operand(zero_reg));
+ __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(instr->TrueLabel(chunk_),
+ ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1969,8 +2107,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
- __ Branch(true_label, ne, at, Operand(zero_reg));
- __ Branch(false_label);
+ __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+ __ Branch(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
@@ -1978,7 +2116,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(true_label, eq, scratch, Operand(SYMBOL_TYPE));
+ __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -1988,14 +2126,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&not_heap_number, ne, map, Operand(at));
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+ __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ ne, dbl_scratch, kDoubleRegZero);
// Falls through if dbl_scratch == 0.
- __ Branch(false_label);
+ __ Branch(instr->FalseLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
}
}
}
@@ -2003,7 +2145,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) {
if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -2044,18 +2186,14 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
Condition cond = TokenToCondition(instr->op(), false);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2066,10 +2204,10 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
// If a NaN is involved, i.e. the result is unordered,
// jump to false block label.
- __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+ __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
left_reg, right_reg);
- EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+ EmitBranchF(instr, cond, left_reg, right_reg);
} else {
Register cmp_left;
Operand cmp_right = Operand(0);
@@ -2099,7 +2237,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
cmp_right = Operand(ToRegister(right));
}
- EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+ EmitBranch(instr, cond, cmp_left, cmp_right);
}
}
}
@@ -2108,20 +2246,15 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- EmitBranch(true_block, false_block, eq, left, Operand(right));
+ EmitBranch(instr, eq, left, Operand(right));
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- EmitBranch(true_block, false_block, eq, left,
- Operand(instr->hydrogen()->right()));
+ EmitBranch(instr, eq, left, Operand(instr->hydrogen()->right()));
}
@@ -2156,23 +2289,22 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
+ EmitIsObject(reg, temp1, temp2,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond, temp2,
+ EmitBranch(instr, true_cond, temp2,
Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
__ GetObjectType(input, temp1, temp1);
return lt;
@@ -2183,25 +2315,21 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
- EmitIsString(reg, temp1, false_label);
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond, temp1,
+ EmitBranch(instr, true_cond, temp1,
Operand(FIRST_NONSTRING_TYPE));
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Register input_reg = EmitLoadRegister(instr->value(), at);
__ And(at, input_reg, kSmiTagMask);
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+ EmitBranch(instr, eq, at, Operand(zero_reg));
}
@@ -2209,14 +2337,13 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
}
@@ -2242,15 +2369,13 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
- EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+ EmitBranch(instr, condition, v0, Operand(zero_reg));
}
@@ -2278,16 +2403,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ GetObjectType(input, scratch, scratch);
- EmitBranch(true_block,
- false_block,
+ EmitBranch(instr,
BranchCondition(instr->hydrogen()),
scratch,
Operand(TestType(instr->hydrogen())));
@@ -2310,13 +2431,10 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
Register input = ToRegister(instr->value());
Register scratch = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ lw(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+ EmitBranch(instr, eq, at, Operand(zero_reg));
}
@@ -2392,26 +2510,19 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+ EmitBranch(instr, eq, temp, Operand(class_name));
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+ EmitBranch(instr, eq, temp, Operand(instr->map()));
}
@@ -2477,10 +2588,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
__ Branch(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
@@ -2634,7 +2744,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), result, Operand(at));
@@ -2668,13 +2778,13 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload.
Register payload = ToRegister(instr->temp());
- __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
}
// Store the value.
- __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
@@ -2732,9 +2842,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
@@ -3497,6 +3607,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label done;
__ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
__ mov(result, input);
+ __ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
__ bind(&done);
@@ -3916,12 +4027,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ li(a2, Operand(undefined_value));
- }
+ // No cell in a2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ li(a2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3930,22 +4038,41 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- ASSERT(FLAG_optimize_constructed_arrays);
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
- bool disable_allocation_sites =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here,
+ // look at the first argument.
+ __ lw(t1, MemOperand(sp, 0));
+ __ Branch(&packed_case, eq, t1, Operand(zero_reg));
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3989,9 +4116,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
if (!transition.is_null()) {
- if (transition->CanBeDeprecated()) {
- transition_maps_.Add(transition, info()->zone());
- }
__ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
@@ -4011,9 +4135,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
ASSERT(!object.is(value));
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
__ sw(value, FieldMemOperand(object, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4238,9 +4362,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ sw(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ Addu(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
@@ -4991,9 +5115,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
}
@@ -5042,10 +5168,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
__ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
@@ -5142,11 +5267,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
@@ -5156,6 +5277,80 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Register scratch2 = ToRegister(instr->temp2());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+ TAG_OBJECT);
+
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(map, constructor);
+ __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ if (initial_map->inobject_properties() != 0) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ sw(scratch, FieldMemOperand(result, property_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ li(a0, Operand(Smi::FromInt(instance_size)));
+ __ push(a0);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5323,16 +5518,12 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Register cmp1 = no_reg;
Operand cmp2 = Operand(no_reg);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
input,
instr->type_literal(),
cmp1,
@@ -5342,7 +5533,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+ EmitBranch(instr, final_branch_condition, cmp1, cmp2);
}
}
@@ -5455,12 +5646,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq, temp1,
+ EmitBranch(instr, eq, temp1,
Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
@@ -5611,15 +5800,15 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ // Normally we record the first unknown OSR value as the entrypoint to the OSR
+ // code, but if there were none, record the entrypoint here.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index a208c4009a..3d31ef10ba 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -55,8 +55,6 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -80,7 +78,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
- // TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -151,6 +148,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -171,10 +169,7 @@ class LCodeGen BASE_EMBEDDED {
int additional_offset);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -297,10 +292,7 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -329,13 +321,13 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block,
- int right_block,
+ template<class InstrType>
+ void EmitBranch(InstrType instr,
Condition cc,
Register src1,
const Operand& src2);
- void EmitBranchF(int left_block,
- int right_block,
+ template<class InstrType>
+ void EmitBranchF(InstrType instr,
Condition cc,
FPURegister src1,
FPURegister src2);
@@ -373,7 +365,8 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
@@ -392,6 +385,17 @@ class LCodeGen BASE_EMBEDDED {
Register source,
int* offset,
AllocationSiteMode mode);
+ // Emit optimized code for integer division.
+ // Inputs are signed.
+ // All registers are clobbered.
+ // If 'remainder' is no_reg, it is not computed.
+ void EmitSignedIntegerDivisionByConstant(Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment);
+
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
@@ -412,8 +416,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index ad39c618ea..638eaa4e8b 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -41,24 +41,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
@@ -81,14 +63,6 @@ void LInstruction::VerifyCall() {
#endif
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -356,8 +330,7 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
+ ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -455,7 +428,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -933,7 +906,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -944,13 +917,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
@@ -962,6 +937,21 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -986,10 +976,13 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
@@ -1342,27 +1335,69 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers a0
- // and a1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), a0);
- LOperand* divisor = UseFixed(instr->right(), a1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), v0)));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* div = new(zone()) LDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
} else {
return DoArithmeticT(Token::DIV, instr);
}
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
+ uint32_t divisor_abs = abs(divisor);
+ // Dividing by 0, 1, and powers of 2 is easy.
+ // Note that IsPowerOf2(0) returns true;
+ ASSERT(IsPowerOf2(0) == true);
+ if (IsPowerOf2(divisor_abs)) return true;
+
+ // We have magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer's Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
+
+ return false;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ // Only optimize when we have magic numbers for the divisor.
+ // The standard integer division routine is usually slower than transitionning
+ // to FPU.
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
return NULL;
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegisterOrConstant(right);
+ LOperand* remainder = TempRegister();
+ ASSERT(right->IsConstant() &&
+ HConstant::cast(right)->HasInteger32Value());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
@@ -1378,6 +1413,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
+ } else if (instr->fixed_right_arg().has_value) {
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
+ return AssignEnvironment(DefineAsRegister(mod));
} else {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
@@ -1677,13 +1716,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1880,7 +1912,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
@@ -2279,6 +2311,14 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2451,8 +2491,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->inlining_kind(),
instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 50feee0cb7..06d30d03de 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -98,7 +99,6 @@ class LCodeGen;
V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
- V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -137,6 +137,7 @@ class LCodeGen;
V(MathCos) \
V(MathExp) \
V(MathFloor) \
+ V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
@@ -486,17 +487,44 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
@@ -623,6 +651,25 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -1195,7 +1242,7 @@ class LBranch: public LControlInstruction<1, 0> {
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch: public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1208,29 +1255,7 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
@@ -2354,6 +2379,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
@@ -2397,6 +2423,21 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LAllocateObject(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2499,26 +2540,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry();
+ LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2642,6 +2667,9 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index d55451b3ec..47e6ff93c7 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -29,10 +29,11 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "runtime.h"
@@ -85,10 +86,9 @@ void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
li(result, Operand(cell));
- lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ lw(result, FieldMemOperand(result, Cell::kValueOffset));
} else {
li(result, Operand(object));
}
@@ -3203,6 +3203,14 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
}
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+ Branch(not_unique_name, lt, reg, Operand(kIsInternalizedMask));
+ Branch(not_unique_name, gt, reg, Operand(SYMBOL_TYPE));
+}
+
+
// Allocates a heap number or jumps to the label if the young space is full and
// a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
@@ -3910,6 +3918,9 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_fp) {
@@ -3948,11 +3959,30 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
}
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
+ lb(t9, MemOperand(t9, 0));
+ beq(t9, zero_reg, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ li(t9, Operand(function));
+
+ bind(&end_profiler_check);
+
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ stub.GenerateCall(this, t9);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -4694,19 +4724,19 @@ void MacroAssembler::InitializeNewString(Register string,
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
+#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_MIPS)
+#endif // V8_HOST_ARCH_MIPS
}
@@ -5037,7 +5067,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// The argument stots are presumed to have been set up by
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 5e6bfbae43..ffae2fd69e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1235,6 +1235,9 @@ class MacroAssembler: public Assembler {
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_fp);
@@ -1403,6 +1406,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* failure);
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+
// Test that both first and second are sequential ASCII strings.
// Assume that they are non-smis.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 2961519af2..7b67a7b47f 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "unicode.h"
#include "log.h"
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index d8a39ab30c..914a758662 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -31,7 +31,7 @@
#include <cstdarg>
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "cpu.h"
#include "disasm.h"
@@ -1394,6 +1394,9 @@ typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
// Here, we pass the first argument in a0, because this function
// does not return a struct.
typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
+ int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
// See comment at SimulatorRuntimeDirectApiCall.
@@ -1402,6 +1405,10 @@ typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
// See comment at SimulatorRuntimeDirectApiCallNew.
typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
int32_t arg1);
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
+typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1571,6 +1578,30 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
target(arg0);
}
} else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL ||
+ redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
+ if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ // See comment at type definition of SimulatorRuntimeDirectApiCall
+ // for explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg1, arg2);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ v8::Handle<v8::Value> result = target(arg1, arg2);
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
+ set_register(v0, arg0);
+ } else {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCallNew target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
+ target(arg0, arg1);
+ }
+ } else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
@@ -1594,6 +1625,30 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
target(arg0, arg1);
}
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
+ if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ // See comment at type definition of SimulatorRuntimeProfilingGetterCall
+ // for explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x %08x\n",
+ reinterpret_cast<void*>(external), arg1, arg2, arg3);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ v8::Handle<v8::Value> result = target(arg1, arg2, arg3);
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
+ set_register(v0, arg0);
+ } else {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCallNew target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(external);
+ target(arg0, arg1, arg2);
+ }
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index a091e5fb20..601cd6d99d 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -316,8 +316,6 @@ class Simulator {
if (instr->InstructionBits() == nopInstr) {
// Short-cut generic nop instructions. They are always valid and they
// never change the simulator state.
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
return;
}
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index be32744b2e..52211904d9 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "ic-inl.h"
#include "codegen.h"
@@ -420,12 +420,10 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<Name> name,
Register scratch,
Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
+ Handle<Cell> cell = GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ li(scratch, Operand(cell));
- __ lw(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
@@ -505,7 +503,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register storage_reg = name_reg;
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (details.type() == CONSTANT_FUNCTION) {
+ Handle<HeapObject> constant(
+ HeapObject::cast(descriptors->GetValue(descriptor)));
+ __ LoadHeapObject(scratch1, constant);
+ __ Branch(miss_restore_name, ne, value_reg, Operand(scratch1));
+ } else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
@@ -534,7 +537,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (object->map()->unused_property_fields() == 0) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -562,6 +566,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
+ if (details.type() == CONSTANT_FUNCTION) {
+ ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ return;
+ }
+
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
@@ -934,6 +945,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
!CallbackTable::ReturnsVoid(masm->isolate(), function_address);
Register first_arg = returns_handle ? a1 : a0;
+ Register second_arg = returns_handle ? a2 : a1;
// first_arg = v8::Arguments&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
@@ -960,8 +972,23 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference(&fun,
type,
masm->isolate());
+
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeInvocationCallback)
+ : FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type =
+ returns_handle ?
+ ExternalReference::PROFILING_API_CALL :
+ ExternalReference::PROFILING_API_CALL_NEW;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
AllowExternalCallThatCantCauseGC scope(masm);
__ CallApiFunctionAndReturn(ref,
+ function_address,
+ thunk_ref,
+ second_arg,
kStackUnwindSpace,
returns_handle,
kFastApiCallArguments + 1);
@@ -1454,6 +1481,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
Register first_arg = returns_handle ? a1 : a0;
Register second_arg = returns_handle ? a2 : a1;
+ Register third_arg = returns_handle ? a3 : a2;
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
__ mov(first_arg, sp); // (first argument - see note below) = Handle<Name>
@@ -1474,14 +1502,28 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Addu(second_arg, sp, kPointerSize);
const int kStackUnwindSpace = kFastApiCallArguments + 1;
+
ApiFunction fun(getter_address);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_GETTER_CALL :
ExternalReference::DIRECT_GETTER_CALL_NEW;
-
ExternalReference ref = ExternalReference(&fun, type, isolate());
+
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeAccessorGetter)
+ : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ returns_handle ?
+ ExternalReference::PROFILING_GETTER_CALL :
+ ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ isolate());
__ CallApiFunctionAndReturn(ref,
+ getter_address,
+ thunk_ref,
+ third_arg,
kStackUnwindSpace,
returns_handle,
5);
@@ -1600,12 +1642,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1672,12 +1714,61 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array.
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+ Register receiver = a1;
+
+ if (cell.is_null()) {
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, a0,
+ t0, name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+ Handle<Cell> kind_feedback_cell =
+ isolate()->factory()->NewCell(kind);
+ __ li(a0, Operand(argc));
+ __ li(a2, Operand(kind_feedback_cell));
+ __ li(a1, Operand(function));
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1922,16 +2013,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2004,16 +2096,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2086,16 +2179,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2167,16 +2261,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2240,16 +2335,17 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2369,16 +2465,17 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2468,7 +2565,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2476,7 +2573,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
@@ -2649,8 +2746,9 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<JSFunction> function) {
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, Handle<String>::cast(name));
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT_FUNCTION);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2709,7 +2807,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
// ----------- S t a t e -------------
@@ -2719,7 +2817,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name));
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2885,7 +2984,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name) {
Label miss;
@@ -2899,13 +2998,11 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// global object. We bail out to the runtime system to do that.
__ li(scratch1(), Operand(cell));
__ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ lw(scratch3(),
- FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
+ __ lw(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
__ Branch(&miss, eq, scratch3(), Operand(scratch2()));
// Store the value in the cell.
- __ sw(value(),
- FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
+ __ sw(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
// Cells are always rescanned, so no write barrier here.
@@ -3027,7 +3124,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label success, miss;
@@ -3039,7 +3136,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
__ li(a3, Operand(cell));
- __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+ __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index e1fd872f3b..28b8fc81ba 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1509,6 +1509,11 @@ FrameDetails.prototype.scopeCount = function() {
};
+FrameDetails.prototype.stepInPositionsImpl = function() {
+ return %GetStepInPositions(this.break_id_, this.frameId());
+};
+
+
/**
* Mirror object for stack frames.
* @param {number} break_id The break id in the VM for which this frame is
@@ -1669,15 +1674,55 @@ FrameMirror.prototype.scope = function(index) {
};
+FrameMirror.prototype.stepInPositions = function() {
+ var script = this.func().script();
+ var funcOffset = this.func().sourcePosition_();
+
+ var stepInRaw = this.details_.stepInPositionsImpl();
+ var result = [];
+ if (stepInRaw) {
+ for (var i = 0; i < stepInRaw.length; i++) {
+ var posStruct = {};
+ var offset = script.locationFromPosition(funcOffset + stepInRaw[i],
+ true);
+ serializeLocationFields(offset, posStruct);
+ var item = {
+ position: posStruct
+ };
+ result.push(item);
+ }
+ }
+
+ return result;
+};
+
+
FrameMirror.prototype.evaluate = function(source, disable_break,
opt_context_object) {
- var result = %DebugEvaluate(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- source,
- Boolean(disable_break),
- opt_context_object);
- return MakeMirror(result);
+ var result_array = %DebugEvaluate(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ source,
+ Boolean(disable_break),
+ opt_context_object);
+ // Silently ignore local variables changes if the frame is optimized.
+ if (!this.isOptimizedFrame()) {
+ var local_scope_on_stack = result_array[1];
+ var local_scope_modifed = result_array[2];
+ for (var n in local_scope_modifed) {
+ var value_on_stack = local_scope_on_stack[n];
+ var value_modifed = local_scope_modifed[n];
+ if (value_on_stack !== value_modifed) {
+ %SetScopeVariableValue(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ 0,
+ n,
+ value_modifed);
+ }
+ }
+ }
+ return MakeMirror(result_array[0]);
};
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 978ea217bd..a8d9b35f3b 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -172,7 +172,8 @@ class CppByteSink : public PartialSnapshotSink {
int data_space_used,
int code_space_used,
int map_space_used,
- int cell_space_used) {
+ int cell_space_used,
+ int property_cell_space_used) {
fprintf(fp_,
"const int Snapshot::%snew_space_used_ = %d;\n",
prefix,
@@ -197,6 +198,10 @@ class CppByteSink : public PartialSnapshotSink {
"const int Snapshot::%scell_space_used_ = %d;\n",
prefix,
cell_space_used);
+ fprintf(fp_,
+ "const int Snapshot::%sproperty_cell_space_used_ = %d;\n",
+ prefix,
+ property_cell_space_used);
}
void WritePartialSnapshot() {
@@ -307,6 +312,9 @@ int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
+ // Disable the i18n extension, as it doesn't support being snapshotted yet.
+ i::FLAG_enable_i18n = false;
+
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -387,7 +395,7 @@ int main(int argc, char** argv) {
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
- i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
+ i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Dispose(isolate);
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
@@ -417,7 +425,8 @@ int main(int argc, char** argv) {
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
+ partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
+ partial_ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
sink.WriteSpaceUsed(
"",
ser.CurrentAllocationAddress(i::NEW_SPACE),
@@ -425,6 +434,7 @@ int main(int argc, char** argv) {
ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
ser.CurrentAllocationAddress(i::CODE_SPACE),
ser.CurrentAllocationAddress(i::MAP_SPACE),
- ser.CurrentAllocationAddress(i::CELL_SPACE));
+ ser.CurrentAllocationAddress(i::CELL_SPACE),
+ ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
return 0;
}
diff --git a/deps/v8/src/natives.h b/deps/v8/src/natives.h
index 5f34420d0b..e3f69d1dae 100644
--- a/deps/v8/src/natives.h
+++ b/deps/v8/src/natives.h
@@ -36,7 +36,7 @@ typedef bool (*NativeSourceCallback)(Vector<const char> name,
int index);
enum NativeType {
- CORE, EXPERIMENTAL, D8, TEST
+ CORE, EXPERIMENTAL, D8, TEST, I18N
};
template <NativeType type>
@@ -61,6 +61,7 @@ class NativesCollection {
typedef NativesCollection<CORE> Natives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
+typedef NativesCollection<I18N> I18NNatives;
} } // namespace v8::internal
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 4008181bad..c0c0e477bf 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -163,8 +163,11 @@ void HeapObject::HeapObjectVerify() {
case JS_BUILTINS_OBJECT_TYPE:
JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
+ case CELL_TYPE:
+ Cell::cast(this)->CellVerify();
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::cast(this)->PropertyCellVerify();
break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
@@ -204,6 +207,9 @@ void HeapObject::HeapObjectVerify() {
case JS_TYPED_ARRAY_TYPE:
JSTypedArray::cast(this)->JSTypedArrayVerify();
break;
+ case JS_DATA_VIEW_TYPE:
+ JSDataView::cast(this)->JSDataViewVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -615,9 +621,16 @@ void Oddball::OddballVerify() {
}
-void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
- CHECK(IsJSGlobalPropertyCell());
+void Cell::CellVerify() {
+ CHECK(IsCell());
+ VerifyObjectField(kValueOffset);
+}
+
+
+void PropertyCell::PropertyCellVerify() {
+ CHECK(IsPropertyCell());
VerifyObjectField(kValueOffset);
+ VerifyObjectField(kTypeOffset);
}
@@ -751,8 +764,8 @@ void JSArrayBuffer::JSArrayBufferVerify() {
}
-void JSTypedArray::JSTypedArrayVerify() {
- CHECK(IsJSTypedArray());
+void JSArrayBufferView::JSArrayBufferViewVerify() {
+ CHECK(IsJSArrayBufferView());
JSObjectVerify();
VerifyPointer(buffer());
CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
@@ -764,7 +777,12 @@ void JSTypedArray::JSTypedArrayVerify() {
VerifyPointer(byte_length());
CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
|| byte_length()->IsUndefined());
+}
+
+void JSTypedArray::JSTypedArrayVerify() {
+ CHECK(IsJSTypedArray());
+ JSArrayBufferViewVerify();
VerifyPointer(length());
CHECK(length()->IsSmi() || length()->IsHeapNumber()
|| length()->IsUndefined());
@@ -773,6 +791,12 @@ void JSTypedArray::JSTypedArrayVerify() {
}
+void JSDataView::JSDataViewVerify() {
+ CHECK(IsJSDataView());
+ JSArrayBufferViewVerify();
+}
+
+
void Foreign::ForeignVerify() {
CHECK(IsForeign());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index e60f0f36f1..fe054dad4b 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -221,12 +221,9 @@ bool Object::IsSpecFunction() {
bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- // Because the internalized tag is non-zero and no non-string types have the
- // internalized bit set we can test for internalized strings with a very
- // simple test operation.
STATIC_ASSERT(kInternalizedTag != 0);
- ASSERT(kNotStringTag + kIsInternalizedMask > LAST_TYPE);
- return (type & kIsInternalizedMask) != 0;
+ return (type & (kIsNotStringMask | kIsInternalizedMask)) ==
+ (kInternalizedTag | kStringTag);
}
@@ -323,7 +320,8 @@ StringShape::StringShape(InstanceType t)
bool StringShape::IsInternalized() {
ASSERT(valid());
STATIC_ASSERT(kInternalizedTag != 0);
- return (type_ & kIsInternalizedMask) != 0;
+ return (type_ & (kIsNotStringMask | kIsInternalizedMask)) ==
+ (kInternalizedTag | kStringTag);
}
@@ -668,7 +666,8 @@ template <> inline bool Is<JSFunction>(Object* obj) {
TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
+TYPE_CHECKER(Cell, CELL_TYPE)
+TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
@@ -694,6 +693,14 @@ bool Object::IsBoolean() {
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+
+
+bool Object::IsJSArrayBufferView() {
+ return IsJSDataView() || IsJSTypedArray();
+}
+
+
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
@@ -1621,17 +1628,28 @@ void Oddball::set_kind(byte value) {
}
-Object* JSGlobalPropertyCell::value() {
+Object* Cell::value() {
return READ_FIELD(this, kValueOffset);
}
-void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
+void Cell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
- ASSERT(!val->IsJSGlobalPropertyCell());
+ ASSERT(!val->IsPropertyCell() && !val->IsCell());
WRITE_FIELD(this, kValueOffset, val);
}
+ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
+
+Object* PropertyCell::type_raw() {
+ return READ_FIELD(this, kTypeOffset);
+}
+
+
+void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) {
+ WRITE_FIELD(this, kTypeOffset, val);
+}
+
int JSObject::GetHeaderSize() {
InstanceType type = map()->instance_type();
@@ -1662,6 +1680,8 @@ int JSObject::GetHeaderSize() {
return JSArrayBuffer::kSize;
case JS_TYPED_ARRAY_TYPE:
return JSTypedArray::kSize;
+ case JS_DATA_VIEW_TYPE:
+ return JSDataView::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
@@ -2080,30 +2100,6 @@ void FixedArray::set_the_hole(int index) {
}
-void FixedArray::set_unchecked(int index, Smi* value) {
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set_unchecked(Heap* heap,
- int index,
- Object* value,
- WriteBarrierMode mode) {
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
-}
-
-
-void FixedArray::set_null_unchecked(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
-}
-
-
double* FixedDoubleArray::data_start() {
return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
}
@@ -2552,7 +2548,8 @@ CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(JSGlobalPropertyCell)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(JSFunction)
@@ -2563,7 +2560,9 @@ CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSFunctionProxy)
@@ -3388,15 +3387,13 @@ int Map::pre_allocated_property_fields() {
int HeapObject::SizeFromMap(Map* map) {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
- // We can ignore the "internalized" bit because it is only set for strings
- // and thus implies a string type.
- int instance_type =
- static_cast<int>(map->instance_type()) & ~kIsInternalizedMask;
// Only inline the most frequent cases.
+ int instance_type = static_cast<int>(map->instance_type());
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
- if (instance_type == ASCII_STRING_TYPE) {
+ if (instance_type == ASCII_STRING_TYPE ||
+ instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
return SeqOneByteString::SizeFor(
reinterpret_cast<SeqOneByteString*>(this)->length());
}
@@ -3406,7 +3403,8 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == FREE_SPACE_TYPE) {
return reinterpret_cast<FreeSpace*>(this)->size();
}
- if (instance_type == STRING_TYPE) {
+ if (instance_type == STRING_TYPE ||
+ instance_type == INTERNALIZED_STRING_TYPE) {
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
}
@@ -3565,11 +3563,6 @@ bool Map::is_dictionary_map() {
}
-JSFunction* Map::unchecked_constructor() {
- return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
-}
-
-
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -3641,6 +3634,9 @@ bool Map::CanBeDeprecated() {
details.representation().IsHeapObject()) {
return true;
}
+ if (FLAG_track_fields && details.type() == CONSTANT_FUNCTION) {
+ return true;
+ }
}
return false;
}
@@ -3659,17 +3655,6 @@ bool Map::CanOmitPrototypeChecks() {
}
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code) {
- Handle<DependentCode> codes =
- DependentCode::Insert(Handle<DependentCode>(dependent_code()),
- group, code);
- if (*codes != dependent_code()) {
- set_dependent_code(*codes);
- }
-}
-
-
int DependentCode::number_of_entries(DependencyGroup group) {
if (length() == 0) return 0;
return Smi::cast(get(group))->value();
@@ -3681,32 +3666,52 @@ void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
}
+bool DependentCode::is_code_at(int i) {
+ return get(kCodesStartIndex + i)->IsCode();
+}
+
Code* DependentCode::code_at(int i) {
return Code::cast(get(kCodesStartIndex + i));
}
-void DependentCode::set_code_at(int i, Code* value) {
- set(kCodesStartIndex + i, value);
+CompilationInfo* DependentCode::compilation_info_at(int i) {
+ return reinterpret_cast<CompilationInfo*>(
+ Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
+}
+
+
+void DependentCode::set_object_at(int i, Object* object) {
+ set(kCodesStartIndex + i, object);
}
-Object** DependentCode::code_slot_at(int i) {
+Object* DependentCode::object_at(int i) {
+ return get(kCodesStartIndex + i);
+}
+
+
+Object** DependentCode::slot_at(int i) {
return HeapObject::RawField(
this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
}
-void DependentCode::clear_code_at(int i) {
+void DependentCode::clear_at(int i) {
set_undefined(kCodesStartIndex + i);
}
+void DependentCode::copy(int from, int to) {
+ set(kCodesStartIndex + to, get(kCodesStartIndex + from));
+}
+
+
void DependentCode::ExtendGroup(DependencyGroup group) {
GroupStartIndexes starts(this);
for (int g = kGroupCount - 1; g > group; g--) {
if (starts.at(g) < starts.at(g + 1)) {
- set_code_at(starts.at(g + 1), code_at(starts.at(g)));
+ copy(starts.at(g), starts.at(g + 1));
}
}
}
@@ -4034,7 +4039,12 @@ void Code::set_marked_for_deoptimization(bool flag) {
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
- return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
+ switch (kind) {
+#define CASE(name) case name: return true;
+ IC_KIND_LIST(CASE)
+#undef CASE
+ default: return false;
+ }
}
@@ -4275,6 +4285,7 @@ FixedArray* Map::GetPrototypeTransitions() {
MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
if (allow_prototype->IsFailure()) return allow_prototype;
+ int old_number_of_transitions = NumberOfProtoTransitions();
#ifdef DEBUG
if (HasPrototypeTransitions()) {
ASSERT(GetPrototypeTransitions() != proto_transitions);
@@ -4282,6 +4293,7 @@ MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
}
#endif
transitions()->SetPrototypeTransitions(proto_transitions);
+ SetNumberOfProtoTransitions(old_number_of_transitions);
return this;
}
@@ -4439,7 +4451,7 @@ ACCESSORS(AllocationSiteInfo, payload, Object, kPayloadOffset)
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Object, kIdOffset)
+ACCESSORS(Script, id, Smi, kIdOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, data, Object, kDataOffset)
@@ -4723,11 +4735,6 @@ Code* SharedFunctionInfo::code() {
}
-Code* SharedFunctionInfo::unchecked_code() {
- return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
-}
-
-
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
@@ -4791,17 +4798,6 @@ BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
}
-int SharedFunctionInfo::code_age() {
- return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
-}
-
-
-void SharedFunctionInfo::set_code_age(int code_age) {
- int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
- set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
-}
-
-
int SharedFunctionInfo::ic_age() {
return ICAgeBits::decode(counters());
}
@@ -4905,12 +4901,7 @@ bool JSFunction::IsInRecompileQueue() {
Code* JSFunction::code() {
- return Code::cast(unchecked_code());
-}
-
-
-Code* JSFunction::unchecked_code() {
- return reinterpret_cast<Code*>(
+ return Code::cast(
Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
}
@@ -4955,17 +4946,6 @@ Context* JSFunction::context() {
}
-Object* JSFunction::unchecked_context() {
- return READ_FIELD(this, kContextOffset);
-}
-
-
-SharedFunctionInfo* JSFunction::unchecked_shared() {
- return reinterpret_cast<SharedFunctionInfo*>(
- READ_FIELD(this, kSharedFunctionInfoOffset));
-}
-
-
void JSFunction::set_context(Object* value) {
ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
@@ -5270,12 +5250,6 @@ int Code::body_size() {
}
-FixedArray* Code::unchecked_deoptimization_data() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kDeoptimizationDataOffset));
-}
-
-
ByteArray* Code::unchecked_relocation_info() {
return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
}
@@ -5331,14 +5305,14 @@ void JSArrayBuffer::set_is_external(bool value) {
ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
-ACCESSORS(JSArrayBuffer, weak_first_array, Object, kWeakFirstArrayOffset)
+ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
-ACCESSORS(JSTypedArray, buffer, Object, kBufferOffset)
-ACCESSORS(JSTypedArray, byte_offset, Object, kByteOffsetOffset)
-ACCESSORS(JSTypedArray, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
+ACCESSORS(JSArrayBufferView, byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSArrayBufferView, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSArrayBufferView, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
-ACCESSORS(JSTypedArray, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
@@ -5351,12 +5325,6 @@ JSRegExp::Type JSRegExp::TypeTag() {
}
-JSRegExp::Type JSRegExp::TypeTagUnchecked() {
- Smi* smi = Smi::cast(DataAtUnchecked(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
int JSRegExp::CaptureCount() {
switch (TypeTag()) {
case ATOM:
@@ -5392,13 +5360,6 @@ Object* JSRegExp::DataAt(int index) {
}
-Object* JSRegExp::DataAtUnchecked(int index) {
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- int offset = FixedArray::kHeaderSize + index * kPointerSize;
- return READ_FIELD(fa, offset);
-}
-
-
void JSRegExp::SetDataAt(int index, Object* value) {
ASSERT(TypeTag() != NOT_COMPILED);
ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
@@ -5406,18 +5367,6 @@ void JSRegExp::SetDataAt(int index, Object* value) {
}
-void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- if (value->IsSmi()) {
- fa->set_unchecked(index, Smi::cast(value));
- } else {
- // We only do this during GC, so we don't need to notify the write barrier.
- fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
- }
-}
-
-
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
#if DEBUG
@@ -6011,13 +5960,13 @@ TypeFeedbackId TypeFeedbackCells::AstId(int index) {
}
-void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
+void TypeFeedbackCells::SetCell(int index, Cell* cell) {
set(index * 2, cell);
}
-JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
- return JSGlobalPropertyCell::cast(get(index * 2));
+Cell* TypeFeedbackCells::GetCell(int index) {
+ return Cell::cast(get(index * 2));
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 357d984a13..f1616da1aa 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -182,8 +182,11 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_MESSAGE_OBJECT_TYPE:
JSMessageObject::cast(this)->JSMessageObjectPrint(out);
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
+ case CELL_TYPE:
+ Cell::cast(this)->CellPrint(out);
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::cast(this)->PropertyCellPrint(out);
break;
case JS_ARRAY_BUFFER_TYPE:
JSArrayBuffer::cast(this)->JSArrayBufferPrint(out);
@@ -191,6 +194,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_TYPED_ARRAY_TYPE:
JSTypedArray::cast(this)->JSTypedArrayPrint(out);
break;
+ case JS_DATA_VIEW_TYPE:
+ JSDataView::cast(this)->JSDataViewPrint(out);
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
Name::cast(this)->Name##Print(out); \
@@ -533,7 +539,8 @@ static const char* TypeToString(InstanceType type) {
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
case ODDBALL_TYPE: return "ODDBALL";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
+ case CELL_TYPE: return "CELL";
+ case PROPERTY_CELL_TYPE: return "PROPERTY_CELL";
case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
case JS_GENERATOR_OBJECT_TYPE: return "JS_GENERATOR_OBJECT";
case JS_MODULE_TYPE: return "JS_MODULE";
@@ -547,8 +554,9 @@ static const char* TypeToString(InstanceType type) {
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
+ case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
+ case JS_DATA_VIEW_TYPE: return "JS_DATA_VIEW";
case FOREIGN_TYPE: return "FOREIGN";
case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
@@ -817,7 +825,7 @@ void JSArrayBuffer::JSArrayBufferPrint(FILE* out) {
void JSTypedArray::JSTypedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSTypedArray");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - buffer =");
buffer()->ShortPrint(out);
PrintF(out, "\n - byte_offset = ");
@@ -831,6 +839,19 @@ void JSTypedArray::JSTypedArrayPrint(FILE* out) {
}
+void JSDataView::JSDataViewPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSDataView");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - buffer =");
+ buffer()->ShortPrint(out);
+ PrintF(out, "\n - byte_offset = ");
+ byte_offset()->ShortPrint(out);
+ PrintF(out, "\n - byte_length = ");
+ byte_length()->ShortPrint(out);
+ PrintF("\n");
+}
+
+
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -843,7 +864,7 @@ void JSFunction::JSFunctionPrint(FILE* out) {
PrintF(out, "\n - name = ");
shared()->name()->Print(out);
PrintF(out, "\n - context = ");
- unchecked_context()->ShortPrint(out);
+ context()->ShortPrint(out);
PrintF(out, "\n - literals = ");
literals()->ShortPrint(out);
PrintF(out, "\n - code = ");
@@ -917,8 +938,13 @@ void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
}
-void JSGlobalPropertyCell::JSGlobalPropertyCellPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSGlobalPropertyCell");
+void Cell::CellPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Cell");
+}
+
+
+void PropertyCell::PropertyCellPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "PropertyCell");
}
@@ -1093,8 +1119,8 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "AllocationSiteInfo");
PrintF(out, " - payload: ");
- if (payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ if (payload()->IsCell()) {
+ Cell* cell = Cell::cast(payload());
Object* cell_contents = cell->value();
if (cell_contents->IsSmi()) {
ElementsKind kind = static_cast<ElementsKind>(
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 829eab809f..cfb7d4461f 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -83,6 +83,8 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+ table_.Register(kVisitJSDataView, &VisitJSDataView);
+
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
@@ -108,7 +110,7 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
Heap* heap = map->GetHeap();
STATIC_ASSERT(
- JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakFirstViewOffset ==
JSArrayBuffer::kWeakNextOffset + kPointerSize);
VisitPointers(
heap,
@@ -140,6 +142,22 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSDataView::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSDataView::kSize));
+ return JSDataView::kSize;
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor,
@@ -194,13 +212,17 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+ table_.Register(kVisitJSDataView, &VisitJSDataView);
+
// Registration for kVisitJSRegExp is done by StaticVisitor.
- table_.Register(kVisitPropertyCell,
+ table_.Register(kVisitCell,
&FixedBodyVisitor<StaticVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
+ Cell::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+
table_.template RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -240,10 +262,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitGlobalPropertyCell(
+void StaticMarkingVisitor<StaticVisitor>::VisitCell(
Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell = rinfo->target_cell();
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
StaticVisitor::MarkObject(heap, cell);
}
@@ -334,6 +356,30 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark property cell dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over property cells in ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -414,7 +460,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
// Visit shared function info immediately to avoid double checking
// of its flushability later. This is just an optimization because
// the shared function info would eventually be visited.
- SharedFunctionInfo* shared = function->unchecked_shared();
+ SharedFunctionInfo* shared = function->shared();
if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
StaticVisitor::MarkObject(heap, shared->map());
VisitSharedFunctionInfoWeakCode(heap, shared);
@@ -451,7 +497,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
Heap* heap = map->GetHeap();
STATIC_ASSERT(
- JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakFirstViewOffset ==
JSArrayBuffer::kWeakNextOffset + kPointerSize);
StaticVisitor::VisitPointers(
heap,
@@ -481,6 +527,21 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSDataView::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSDataView::kSize));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
Heap* heap, Map* map) {
// Make sure that the back pointer stored either in the map itself or
@@ -595,22 +656,17 @@ inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
template<typename StaticVisitor>
bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
+ SharedFunctionInfo* shared_info = function->shared();
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
MarkBit code_mark = Marking::MarkBitFrom(function->code());
if (code_mark.Get()) {
- if (!FLAG_age_code) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
- }
return false;
}
// The function must have a valid context and not be a builtin.
- if (!IsValidNonBuiltinContext(function->unchecked_context())) {
+ if (!IsValidNonBuiltinContext(function->context())) {
return false;
}
@@ -677,20 +733,12 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- if (FLAG_age_code) {
- return shared_info->code()->IsOld();
- } else {
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
- return true;
+ // Check age of code. If code aging is disabled we never flush.
+ if (!FLAG_age_code || !shared_info->code()->IsOld()) {
+ return false;
}
+
+ return true;
}
@@ -777,7 +825,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
@@ -801,7 +849,7 @@ template<typename StaticVisitor>
void Code::CodeIterateBody(Heap* heap) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 4bf2804584..6502209798 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -91,7 +91,10 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case CODE_TYPE:
return kVisitCode;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
+ return kVisitCell;
+
+ case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
case JS_SET_TYPE:
@@ -140,6 +143,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_TYPED_ARRAY_TYPE:
return kVisitJSTypedArray;
+ case JS_DATA_VIEW_TYPE:
+ return kVisitJSDataView;
+
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index c4d1cc3be1..c2ab45df1d 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -88,12 +88,14 @@ class StaticVisitorBase : public AllStatic {
V(Oddball) \
V(Code) \
V(Map) \
+ V(Cell) \
V(PropertyCell) \
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakMap) \
V(JSArrayBuffer) \
V(JSTypedArray) \
+ V(JSDataView) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -337,6 +339,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+ INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
class DataObjectVisitor {
public:
@@ -392,9 +395,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.GetVisitor(map)(map, obj);
}
+ INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
@@ -414,6 +418,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
+ INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6512c60779..aa678765dd 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -39,6 +39,7 @@
#include "execution.h"
#include "full-codegen.h"
#include "hydrogen.h"
+#include "isolate-inl.h"
#include "objects-inl.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
@@ -47,7 +48,6 @@
#include "safepoint-table.h"
#include "string-stream.h"
#include "utils.h"
-#include "vm-state-inl.h"
#ifdef ENABLE_DISASSEMBLER
#include "disasm.h"
@@ -375,14 +375,8 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("load", self, name));
PropertyCallbackArguments args(isolate, data->data(), self, this);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(fun_obj));
- result = args.Call(call_fun, v8::Utils::ToLocal(key));
- }
+ v8::Handle<v8::Value> result =
+ args.Call(call_fun, v8::Utils::ToLocal(key));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) {
return isolate->heap()->undefined_value();
@@ -629,9 +623,9 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
- ASSERT(!value->IsJSGlobalPropertyCell());
+ ASSERT(!value->IsPropertyCell() && !value->IsCell());
return value;
}
@@ -639,9 +633,8 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
ASSERT(!HasFastProperties());
if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
+ PropertyCell* cell = PropertyCell::cast(
+ property_dictionary()->ValueAt(result->GetDictionaryEntry()));
cell->set_value(value);
} else {
property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
@@ -669,8 +662,7 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
Object* store_value = value;
if (IsGlobalObject()) {
Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
+ MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
Object* dict;
@@ -697,8 +689,8 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
details.attributes(), details.type(), enumeration_index);
if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
+ PropertyCell* cell =
+ PropertyCell::cast(property_dictionary()->ValueAt(entry));
cell->set_value(value);
// Please note we have to update the property details.
property_dictionary()->DetailsAtPut(entry, details);
@@ -730,8 +722,7 @@ MaybeObject* JSObject::DeleteNormalizedProperty(Name* name, DeleteMode mode) {
ASSERT(new_map->is_dictionary_map());
set_map(new_map);
}
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
+ PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
cell->set_value(cell->GetHeap()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
@@ -1385,7 +1376,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
- accumulator->Add(" with %smap 0x%p",
+ accumulator->Add(" with %smap %p",
map_of_this->is_deprecated() ? "deprecated " : "",
map_of_this);
printed = true;
@@ -1567,9 +1558,13 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case FOREIGN_TYPE:
accumulator->Add("<Foreign>");
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
accumulator->Add("Cell for ");
- JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
+ Cell::cast(this)->value()->ShortPrint(accumulator);
+ break;
+ case PROPERTY_CELL_TYPE:
+ accumulator->Add("PropertyCell for ");
+ PropertyCell::cast(this)->value()->ShortPrint(accumulator);
break;
default:
accumulator->Add("<Other heap object (%d)>", map()->instance_type());
@@ -1629,6 +1624,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_ARRAY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
@@ -1661,8 +1657,11 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case CODE_TYPE:
reinterpret_cast<Code*>(this)->CodeIterateBody(v);
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
+ case CELL_TYPE:
+ Cell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::BodyDescriptor::IterateBody(this, v);
break;
case SYMBOL_TYPE:
Symbol::BodyDescriptor::IterateBody(this, v);
@@ -1931,7 +1930,7 @@ MaybeObject* JSObject::AddSlowProperty(Name* name,
int entry = dict->FindEntry(name);
if (entry != NameDictionary::kNotFound) {
store_value = dict->ValueAt(entry);
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
+ PropertyCell::cast(store_value)->set_value(value);
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
@@ -1942,10 +1941,10 @@ MaybeObject* JSObject::AddSlowProperty(Name* name,
}
Heap* heap = GetHeap();
{ MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
+ heap->AllocatePropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
+ PropertyCell::cast(store_value)->set_value(value);
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
Object* result;
@@ -2731,18 +2730,13 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
- result = args.Call(setter,
- v8::Utils::ToLocal(name_handle),
- v8::Utils::ToLocal(value_unhole));
- }
+ Handle<Object> value_unhole(value->IsTheHole() ?
+ isolate->heap()->undefined_value() :
+ value,
+ isolate);
+ v8::Handle<v8::Value> result = args.Call(setter,
+ v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(value_unhole));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
@@ -2841,17 +2835,11 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (call_fun == NULL) return value;
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("store", this, name));
- PropertyCallbackArguments
- args(isolate, data->data(), this, JSObject::cast(holder));
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(call_obj));
- args.Call(call_fun,
- v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- }
+ PropertyCallbackArguments args(
+ isolate, data->data(), this, JSObject::cast(holder));
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
@@ -3267,7 +3255,7 @@ void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
result->NotFound();
return;
}
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
// Make sure to disallow caching for uninitialized constants
// found in the dictionary-mode objects.
@@ -4182,12 +4170,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(query, v8::Utils::ToLocal(name_handle));
- }
+ v8::Handle<v8::Integer> result =
+ args.Call(query, v8::Utils::ToLocal(name_handle));
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4197,12 +4181,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(getter, v8::Utils::ToLocal(name_handle));
- }
+ v8::Handle<v8::Value> result =
+ args.Call(getter, v8::Utils::ToLocal(name_handle));
if (!result.IsEmpty()) return DONT_ENUM;
}
return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
@@ -4322,12 +4302,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(query, index);
- }
+ v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
} else if (!interceptor->getter()->IsUndefined()) {
@@ -4335,12 +4310,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(getter, index);
- }
+ v8::Handle<v8::Value> result = args.Call(getter, index);
if (!result.IsEmpty()) return NONE;
}
@@ -4764,7 +4734,7 @@ Object* JSObject::GetHiddenProperty(Name* key) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
// If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ if (proxy_parent->IsNull()) return GetHeap()->the_hole_value();
ASSERT(proxy_parent->IsJSGlobalObject());
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
@@ -4778,15 +4748,14 @@ Object* JSObject::GetHiddenProperty(Name* key) {
if (key == GetHeap()->identity_hash_string()) {
return inline_value;
} else {
- return GetHeap()->undefined_value();
+ return GetHeap()->the_hole_value();
}
}
- if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
+ if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
Object* entry = hashtable->Lookup(key);
- if (entry->IsTheHole()) return GetHeap()->undefined_value();
return entry;
}
@@ -5010,12 +4979,8 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(Name* name) {
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(deleter, v8::Utils::ToLocal(name_handle));
- }
+ v8::Handle<v8::Boolean> result =
+ args.Call(deleter, v8::Utils::ToLocal(name_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
@@ -5046,12 +5011,7 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(deleter, index);
- }
+ v8::Handle<v8::Boolean> result = args.Call(deleter, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
@@ -5879,11 +5839,12 @@ static bool UpdateGetterSetterInDictionary(
}
-MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- switch (GetElementsKind()) {
+void JSObject::DefineElementAccessor(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -5901,21 +5862,21 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array elements.
- return GetHeap()->undefined_value();
+ return;
case DICTIONARY_ELEMENTS:
- if (UpdateGetterSetterInDictionary(element_dictionary(),
+ if (UpdateGetterSetterInDictionary(object->element_dictionary(),
index,
- getter,
- setter,
+ *getter,
+ *setter,
attributes)) {
- return GetHeap()->undefined_value();
+ return;
}
break;
case NON_STRICT_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
uint32_t length = parameter_map->length();
Object* probe =
index < (length - 2) ? parameter_map->get(index + 2) : NULL;
@@ -5926,10 +5887,10 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
SeededNumberDictionary::cast(arguments);
if (UpdateGetterSetterInDictionary(dictionary,
index,
- getter,
- setter,
+ *getter,
+ *setter,
attributes)) {
- return GetHeap()->undefined_value();
+ return;
}
}
}
@@ -5937,19 +5898,20 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
}
}
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
- accessors->SetComponents(getter, setter);
+ Isolate* isolate = object->GetIsolate();
+ Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
+ accessors->SetComponents(*getter, *setter);
- return SetElementCallback(index, accessors, attributes);
+ CALL_HEAP_FUNCTION_VOID(
+ isolate, object->SetElementCallback(index, *accessors, attributes));
}
-MaybeObject* JSObject::CreateAccessorPairFor(Name* name) {
- LookupResult result(GetHeap()->isolate());
- LocalLookupRealNamedProperty(name, &result);
+Handle<AccessorPair> JSObject::CreateAccessorPairFor(Handle<JSObject> object,
+ Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
if (result.IsPropertyCallbacks()) {
// Note that the result can actually have IsDontDelete() == true when we
// e.g. have to fall back to the slow case while adding a setter after
@@ -5959,47 +5921,37 @@ MaybeObject* JSObject::CreateAccessorPairFor(Name* name) {
// DefinePropertyAccessor below.
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->Copy();
+ return AccessorPair::Copy(handle(AccessorPair::cast(obj), isolate));
}
}
- return GetHeap()->AllocateAccessorPair();
+ return isolate->factory()->NewAccessorPair();
}
-MaybeObject* JSObject::DefinePropertyAccessor(Name* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
+void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
// We could assert that the property is configurable here, but we would need
// to do a lookup, which seems to be a bit of overkill.
- Heap* heap = GetHeap();
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (HasFastProperties() && !only_attribute_changes &&
- (map()->NumberOfOwnDescriptors() <
+ if (object->HasFastProperties() && !only_attribute_changes &&
+ (object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors)) {
- MaybeObject* getterOk = heap->undefined_value();
- if (!getter->IsNull()) {
- getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
- if (getterOk->IsFailure()) return getterOk;
- }
-
- MaybeObject* setterOk = heap->undefined_value();
- if (getterOk != heap->null_value() && !setter->IsNull()) {
- setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
- if (setterOk->IsFailure()) return setterOk;
- }
-
- if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
- return heap->undefined_value();
- }
+ bool getterOk = getter->IsNull() ||
+ DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
+ bool setterOk = !getterOk || setter->IsNull() ||
+ DefineFastAccessor(object, name, ACCESSOR_SETTER, setter, attributes);
+ if (getterOk && setterOk) return;
}
- AccessorPair* accessors;
- MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+ Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
+ accessors->SetComponents(*getter, *setter);
- accessors->SetComponents(getter, setter);
- return SetPropertyCallback(name, accessors, attributes);
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->SetPropertyCallback(*name, *accessors, attributes));
}
@@ -6101,29 +6053,21 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->DefineAccessor(*name, *getter, *setter, attributes));
-}
-
-MaybeObject* JSObject::DefineAccessor(Name* name_raw,
- Object* getter_raw,
- Object* setter_raw,
- PropertyAttributes attributes) {
- Isolate* isolate = GetIsolate();
+ Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ return;
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(
- name_raw, getter_raw, setter_raw, attributes);
+ DefineAccessor(
+ Handle<JSObject>::cast(proto), name, getter, setter, attributes);
+ return;
}
// Make sure that the top context does not change when doing callbacks or
@@ -6131,68 +6075,58 @@ MaybeObject* JSObject::DefineAccessor(Name* name_raw,
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
- if (name_raw->IsString()) String::cast(name_raw)->TryFlatten();
-
- if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
+ if (name->IsString()) String::cast(*name)->TryFlatten();
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> getter(getter_raw, isolate);
- Handle<Object> setter(setter_raw, isolate);
+ if (!object->CanSetCallback(*name)) return;
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
bool preexists = false;
if (is_observed) {
if (is_element) {
- preexists = HasLocalElement(index);
- if (preexists && self->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(self, index);
+ preexists = object->HasLocalElement(index);
+ if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
+ old_value = Object::GetElement(object, index);
}
} else {
LookupResult lookup(isolate);
- LocalLookup(*name, &lookup, true);
+ object->LocalLookup(*name, &lookup, true);
preexists = lookup.IsProperty();
if (preexists && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
+ old_value = Object::GetProperty(object, name);
}
}
}
- MaybeObject* result = is_element ?
- self->DefineElementAccessor(index, *getter, *setter, attributes) :
- self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ if (is_element) {
+ DefineElementAccessor(object, index, getter, setter, attributes);
+ } else {
+ DefinePropertyAccessor(object, name, getter, setter, attributes);
+ }
if (is_observed) {
const char* type = preexists ? "reconfigured" : "new";
- EnqueueChangeRecord(self, type, name, old_value);
+ EnqueueChangeRecord(object, type, name, old_value);
}
-
- return *hresult;
}
-static MaybeObject* TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
- int target_descriptor,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
+static bool TryAccessorTransition(JSObject* self,
+ Map* transitioned_map,
+ int target_descriptor,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes) {
DescriptorArray* descs = transitioned_map->instance_descriptors();
PropertyDetails details = descs->GetDetails(target_descriptor);
// If the transition target was not callbacks, fall back to the slow case.
- if (details.type() != CALLBACKS) return self->GetHeap()->null_value();
+ if (details.type() != CALLBACKS) return false;
Object* descriptor = descs->GetCallbacksObject(target_descriptor);
- if (!descriptor->IsAccessorPair()) return self->GetHeap()->null_value();
+ if (!descriptor->IsAccessorPair()) return false;
Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
PropertyAttributes target_attributes = details.attributes();
@@ -6200,25 +6134,46 @@ static MaybeObject* TryAccessorTransition(JSObject* self,
// Reuse transition if adding same accessor with same attributes.
if (target_accessor == accessor && target_attributes == attributes) {
self->set_map(transitioned_map);
- return self;
+ return true;
}
// If either not the same accessor, or not the same attributes, fall back to
// the slow case.
- return self->GetHeap()->null_value();
+ return false;
}
-MaybeObject* JSObject::DefineFastAccessor(Name* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
+static MaybeObject* CopyInsertDescriptor(Map* map,
+ Name* name,
+ AccessorPair* accessors,
+ PropertyAttributes attributes) {
+ CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
+ return map->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
+}
+
+
+static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<AccessorPair> accessors,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyInsertDescriptor(*map, *name, *accessors, attributes),
+ Map);
+}
+
+
+bool JSObject::DefineFastAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes) {
ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookup(*name, &result);
if (result.IsFound() && !result.IsPropertyCallbacks()) {
- return GetHeap()->null_value();
+ return false;
}
// Return success if the same accessor with the same attributes already exist.
@@ -6228,65 +6183,53 @@ MaybeObject* JSObject::DefineFastAccessor(Name* name,
if (callback_value->IsAccessorPair()) {
source_accessors = AccessorPair::cast(callback_value);
Object* entry = source_accessors->get(component);
- if (entry == accessor && result.GetAttributes() == attributes) {
- return this;
+ if (entry == *accessor && result.GetAttributes() == attributes) {
+ return true;
}
} else {
- return GetHeap()->null_value();
+ return false;
}
int descriptor_number = result.GetDescriptorIndex();
- map()->LookupTransition(this, name, &result);
+ object->map()->LookupTransition(*object, *name, &result);
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
ASSERT(target->NumberOfOwnDescriptors() ==
- map()->NumberOfOwnDescriptors());
+ object->map()->NumberOfOwnDescriptors());
// This works since descriptors are sorted in order of addition.
- ASSERT(map()->instance_descriptors()->GetKey(descriptor_number) == name);
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
+ ASSERT(object->map()->instance_descriptors()->
+ GetKey(descriptor_number) == *name);
+ return TryAccessorTransition(*object, target, descriptor_number,
+ component, *accessor, attributes);
}
} else {
// If not, lookup a transition.
- map()->LookupTransition(this, name, &result);
+ object->map()->LookupTransition(*object, *name, &result);
// If there is a transition, try to follow it.
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
int descriptor_number = target->LastAdded();
ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
- ->Equals(name));
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
+ ->Equals(*name));
+ return TryAccessorTransition(*object, target, descriptor_number,
+ component, *accessor, attributes);
}
}
// If there is no transition yet, add a transition to the a new accessor pair
- // containing the accessor.
- AccessorPair* accessors;
- MaybeObject* maybe_accessors;
-
- // Allocate a new pair if there were no source accessors. Otherwise, copy the
- // pair and modify the accessor.
- if (source_accessors != NULL) {
- maybe_accessors = source_accessors->Copy();
- } else {
- maybe_accessors = GetHeap()->AllocateAccessorPair();
- }
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- accessors->set(component, accessor);
-
- CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
-
- Map* new_map;
- MaybeObject* maybe_new_map =
- map()->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
- return this;
+ // containing the accessor. Allocate a new pair if there were no source
+ // accessors. Otherwise, copy the pair and modify the accessor.
+ Handle<AccessorPair> accessors = source_accessors != NULL
+ ? AccessorPair::Copy(Handle<AccessorPair>(source_accessors))
+ : isolate->factory()->NewAccessorPair();
+ accessors->set(component, *accessor);
+ Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()),
+ name, accessors, attributes);
+ object->set_map(*new_map);
+ return true;
}
@@ -6730,6 +6673,11 @@ MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
}
+Handle<Map> Map::Copy(Handle<Map> map) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->Copy(), Map);
+}
+
+
MaybeObject* Map::Copy() {
DescriptorArray* descriptors = instance_descriptors();
DescriptorArray* new_descriptors;
@@ -7861,14 +7809,10 @@ void DescriptorArray::Sort() {
}
-MaybeObject* AccessorPair::Copy() {
- Heap* heap = GetHeap();
- AccessorPair* copy;
- MaybeObject* maybe_copy = heap->AllocateAccessorPair();
- if (!maybe_copy->To(&copy)) return maybe_copy;
-
- copy->set_getter(getter());
- copy->set_setter(setter());
+Handle<AccessorPair> AccessorPair::Copy(Handle<AccessorPair> pair) {
+ Handle<AccessorPair> copy = pair->GetIsolate()->factory()->NewAccessorPair();
+ copy->set_getter(pair->getter());
+ copy->set_setter(pair->setter());
return copy;
}
@@ -8905,8 +8849,8 @@ AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) {
bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) {
ASSERT(kind != NULL);
- if (payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ if (payload()->IsCell()) {
+ Cell* cell = Cell::cast(payload());
Object* cell_contents = cell->value();
if (cell_contents->IsSmi()) {
*kind = static_cast<ElementsKind>(
@@ -9181,7 +9125,8 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForLazyRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
set_code_no_write_barrier(
@@ -9191,9 +9136,13 @@ void JSFunction::MarkForLazyRecompilation() {
void JSFunction::MarkForParallelRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_parallel_recompilation);
+ if (!FLAG_parallel_recompilation) {
+ JSFunction::MarkForLazyRecompilation();
+ return;
+ }
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Marking ");
PrintName();
@@ -9206,7 +9155,10 @@ void JSFunction::MarkForParallelRecompilation() {
void JSFunction::MarkForInstallingRecompiledCode() {
- ASSERT(is_compiled() && !IsOptimized());
+ // The debugger could have switched the builtin to lazy compile.
+ // In that case, simply carry on. It will be dealt with later.
+ ASSERT(IsInRecompileQueue() || GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(FLAG_parallel_recompilation);
set_code_no_write_barrier(
@@ -9216,7 +9168,10 @@ void JSFunction::MarkForInstallingRecompiledCode() {
void JSFunction::MarkInRecompileQueue() {
- ASSERT(is_compiled() && !IsOptimized());
+ // We can only arrive here via the parallel-recompilation builtin. If
+ // break points were set, the code would point to the lazy-compile builtin.
+ ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(IsMarkedForParallelRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
@@ -9395,7 +9350,6 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
} else {
ASSERT(function->shared()->allows_lazy_compilation());
CompilationInfoWithZone info(function);
@@ -9435,6 +9389,11 @@ bool JSFunction::IsInlineable() {
}
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), object->OptimizeAsPrototype());
+}
+
+
MaybeObject* JSObject::OptimizeAsPrototype() {
if (IsGlobalObject()) return this;
@@ -9980,13 +9939,13 @@ void ObjectVisitor::VisitCodeEntry(Address entry_address) {
}
-void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
VisitPointer(&cell);
if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+ rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
}
}
@@ -10012,12 +9971,18 @@ void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
VisitExternalReferences(p, p + 1);
}
-byte Code::compare_nil_types() {
+byte Code::compare_nil_state() {
ASSERT(is_compare_nil_ic_stub());
return CompareNilICStub::ExtractTypesFromExtraICState(
extended_extra_ic_state());
}
+byte Code::compare_nil_value() {
+ ASSERT(is_compare_nil_ic_stub());
+ return CompareNilICStub::ExtractNilValueFromExtraICState(
+ extended_extra_ic_state());
+}
+
void Code::InvalidateRelocation() {
set_relocation_info(GetHeap()->empty_byte_array());
@@ -10048,7 +10013,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
intptr_t delta = instruction_start() - desc.buffer;
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::kApplyMask;
// Needed to find target_object and runtime_entry on X64
@@ -10059,8 +10024,8 @@ void Code::CopyFrom(const CodeDesc& desc) {
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ } else if (mode == RelocInfo::CELL) {
+ Handle<Cell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
@@ -10142,29 +10107,45 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
}
-Map* Code::FindFirstMap() {
+Object* Code::FindNthObject(int n, Map* match_map) {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
- if (object->IsMap()) return Map::cast(object);
+ if (object->IsHeapObject()) {
+ if (HeapObject::cast(object)->map() == match_map) {
+ if (--n == 0) return object;
+ }
+ }
}
return NULL;
}
-void Code::ReplaceFirstMap(Map* replace_with) {
+Map* Code::FindFirstMap() {
+ Object* result = FindNthObject(1, GetHeap()->meta_map());
+ return (result != NULL) ? Map::cast(result) : NULL;
+}
+
+
+void Code::ReplaceNthObject(int n,
+ Map* match_map,
+ Object* replace_with) {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
- if (object->IsMap()) {
- info->set_target_object(replace_with);
- return;
+ if (object->IsHeapObject()) {
+ if (HeapObject::cast(object)->map() == match_map) {
+ if (--n == 0) {
+ info->set_target_object(replace_with);
+ return;
+ }
+ }
}
}
UNREACHABLE();
@@ -10183,6 +10164,11 @@ void Code::FindAllMaps(MapHandleList* maps) {
}
+void Code::ReplaceFirstMap(Map* replace_with) {
+ ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
+}
+
+
Code* Code::FindFirstCode() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
@@ -10224,6 +10210,21 @@ Name* Code::FindFirstName() {
}
+void Code::ReplaceNthCell(int n, Cell* replace_with) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (--n == 0) {
+ info->set_target_cell(replace_with);
+ return;
+ }
+ }
+ UNREACHABLE();
+}
+
+
void Code::ClearInlineCaches() {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -10246,7 +10247,7 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
+ Cell* cell = type_feedback_cells->GetCell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
}
@@ -10299,6 +10300,18 @@ byte* Code::FindCodeAgeSequence() {
}
+int Code::GetAge() {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence == NULL) {
+ return Code::kNoAge;
+ }
+ Age age;
+ MarkingParity parity;
+ GetCodeAgeAndParity(sequence, &age, &parity);
+ return age;
+}
+
+
void Code::GetCodeAgeAndParity(Code* code, Age* age,
MarkingParity* parity) {
Isolate* isolate = Isolate::Current();
@@ -10366,24 +10379,26 @@ void Code::PrintDeoptLocation(int bailout_id) {
}
+bool Code::CanDeoptAt(Address pc) {
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(deoptimization_data());
+ Address code_start_address = instruction_start();
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address address = code_start_address + deopt_data->Pc(i)->value();
+ if (address == pc) return true;
+ }
+ return false;
+}
+
+
// Identify kind of code.
const char* Code::Kind2String(Kind kind) {
switch (kind) {
- case FUNCTION: return "FUNCTION";
- case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
- case STUB: return "STUB";
- case BUILTIN: return "BUILTIN";
- case LOAD_IC: return "LOAD_IC";
- case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
- case STORE_IC: return "STORE_IC";
- case KEYED_STORE_IC: return "KEYED_STORE_IC";
- case CALL_IC: return "CALL_IC";
- case KEYED_CALL_IC: return "KEYED_CALL_IC";
- case UNARY_OP_IC: return "UNARY_OP_IC";
- case BINARY_OP_IC: return "BINARY_OP_IC";
- case COMPARE_IC: return "COMPARE_IC";
- case COMPARE_NIL_IC: return "COMPARE_NIL_IC";
- case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
+#define CASE(name) case name: return #name;
+ CODE_KIND_LIST(CASE)
+#undef CASE
+ case NUMBER_OF_KINDS: break;
}
UNREACHABLE();
return NULL;
@@ -10478,9 +10493,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
- case Translation::DUPLICATE:
- break;
-
case Translation::REGISTER: {
int reg_code = iterator.Next();
PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
@@ -10538,11 +10550,8 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
}
case Translation::ARGUMENTS_OBJECT: {
- bool args_known = iterator.Next();
- int args_index = iterator.Next();
int args_length = iterator.Next();
- PrintF(out, "{index=%d, length=%d, known=%d}",
- args_index, args_length, args_known);
+ PrintF(out, "{length=%d}", args_length);
break;
}
}
@@ -11010,63 +11019,61 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
}
-Map* Map::GetPrototypeTransition(Object* prototype) {
- FixedArray* cache = GetPrototypeTransitions();
- int number_of_transitions = NumberOfProtoTransitions();
+Handle<Map> Map::GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype) {
+ FixedArray* cache = map->GetPrototypeTransitions();
+ int number_of_transitions = map->NumberOfProtoTransitions();
const int proto_offset =
kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
const int step = kProtoTransitionElementsPerEntry;
for (int i = 0; i < number_of_transitions; i++) {
- if (cache->get(proto_offset + i * step) == prototype) {
- Object* map = cache->get(map_offset + i * step);
- return Map::cast(map);
+ if (cache->get(proto_offset + i * step) == *prototype) {
+ Object* result = cache->get(map_offset + i * step);
+ return Handle<Map>(Map::cast(result));
}
}
- return NULL;
+ return Handle<Map>();
}
-MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
- ASSERT(map->IsMap());
- ASSERT(HeapObject::cast(prototype)->map()->IsMap());
+Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map) {
+ ASSERT(target_map->IsMap());
+ ASSERT(HeapObject::cast(*prototype)->map()->IsMap());
// Don't cache prototype transition if this map is shared.
- if (is_shared() || !FLAG_cache_prototype_transitions) return this;
-
- FixedArray* cache = GetPrototypeTransitions();
+ if (map->is_shared() || !FLAG_cache_prototype_transitions) return map;
const int step = kProtoTransitionElementsPerEntry;
const int header = kProtoTransitionHeaderSize;
+ Handle<FixedArray> cache(map->GetPrototypeTransitions());
int capacity = (cache->length() - header) / step;
-
- int transitions = NumberOfProtoTransitions() + 1;
+ int transitions = map->NumberOfProtoTransitions() + 1;
if (transitions > capacity) {
- if (capacity > kMaxCachedPrototypeTransitions) return this;
+ if (capacity > kMaxCachedPrototypeTransitions) return map;
- FixedArray* new_cache;
// Grow array by factor 2 over and above what we need.
- { MaybeObject* maybe_cache =
- GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
- if (!maybe_cache->To(&new_cache)) return maybe_cache;
- }
+ Factory* factory = map->GetIsolate()->factory();
+ cache = factory->CopySizeFixedArray(cache, transitions * 2 * step + header);
- for (int i = 0; i < capacity * step; i++) {
- new_cache->set(i + header, cache->get(i + header));
- }
- cache = new_cache;
- MaybeObject* set_result = SetPrototypeTransitions(cache);
- if (set_result->IsFailure()) return set_result;
+ CALL_AND_RETRY_OR_DIE(map->GetIsolate(),
+ map->SetPrototypeTransitions(*cache),
+ break,
+ return Handle<Map>());
}
- int last = transitions - 1;
+ // Reload number of transitions as GC might shrink them.
+ int last = map->NumberOfProtoTransitions();
+ int entry = header + last * step;
- cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
- cache->set(header + last * step + kProtoTransitionMapOffset, map);
- SetNumberOfProtoTransitions(transitions);
+ cache->set(entry + kProtoTransitionPrototypeOffset, *prototype);
+ cache->set(entry + kProtoTransitionMapOffset, *target_map);
+ map->SetNumberOfProtoTransitions(transitions);
- return cache;
+ return map;
}
@@ -11091,6 +11098,24 @@ void Map::ZapPrototypeTransitions() {
}
+void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, group, info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
+}
+
+
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()), group, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
Recompute(entries);
}
@@ -11105,15 +11130,25 @@ void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
}
+DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
+ DependencyGroup group) {
+ AllowDeferredHandleDereference dependencies_are_safe;
+ if (group == DependentCode::kPropertyCellChangedGroup) {
+ return Handle<PropertyCell>::cast(object)->dependent_code();
+ }
+ return Handle<Map>::cast(object)->dependent_code();
+}
+
+
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
- Handle<Code> value) {
+ Handle<Object> object) {
GroupStartIndexes starts(*entries);
int start = starts.at(group);
int end = starts.at(group + 1);
int number_of_entries = starts.number_of_entries();
- if (start < end && entries->code_at(end - 1) == *value) {
- // Do not append the code if it is already in the array.
+ if (start < end && entries->object_at(end - 1) == *object) {
+ // Do not append the compilation info if it is already in the array.
// It is sufficient to just check only the last element because
// we process embedded maps of an optimized code in one batch.
return entries;
@@ -11130,7 +11165,7 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
end = starts.at(group + 1);
number_of_entries = starts.number_of_entries();
for (int i = 0; i < number_of_entries; i++) {
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
// If the old fixed array was empty, we need to reset counters of the
// new array.
@@ -11142,17 +11177,79 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
entries = new_entries;
}
entries->ExtendGroup(group);
- entries->set_code_at(end, *value);
+ entries->set_object_at(end, *object);
entries->set_number_of_entries(group, end + 1 - start);
return entries;
}
+void DependentCode::UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code) {
+ DisallowHeapAllocation no_gc;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ set_object_at(i, code);
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = start; i < end; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
+void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ DisallowHeapAllocation no_allocation;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ // Find compilation info wrapper.
+ int info_pos = -1;
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ info_pos = i;
+ break;
+ }
+ }
+ if (info_pos == -1) return; // Not found.
+ int gap = info_pos;
+ // Use the last of each group to fill the gap in the previous group.
+ for (int i = group; i < kGroupCount; i++) {
+ int last_of_group = starts.at(i + 1) - 1;
+ ASSERT(last_of_group >= gap);
+ if (last_of_group == gap) continue;
+ copy(last_of_group, gap);
+ gap = last_of_group;
+ }
+ ASSERT(gap == starts.number_of_entries() - 1);
+ clear_at(gap); // Clear last gap.
+ set_number_of_entries(group, end - start - 1);
+
+#ifdef DEBUG
+ for (int i = start; i < end - 1; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
- int number_of_entries = starts.at(kGroupCount);
+ int number_of_entries = starts.number_of_entries();
for (int i = 0; i < number_of_entries; i++) {
- if (code_at(i) == code) return true;
+ if (object_at(i) == code) return true;
}
return false;
}
@@ -11173,20 +11270,25 @@ void DependentCode::DeoptimizeDependentCodeGroup(
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
- int number_of_entries = starts.at(DependentCode::kGroupCount);
+ int code_entries = starts.number_of_entries();
if (start == end) return;
for (int i = start; i < end; i++) {
- Code* code = code_at(i);
- code->set_marked_for_deoptimization(true);
+ if (is_code_at(i)) {
+ Code* code = code_at(i);
+ code->set_marked_for_deoptimization(true);
+ } else {
+ CompilationInfo* info = compilation_info_at(i);
+ info->AbortDueToDependencyChange();
+ }
}
// Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
- set_code_at(dst, code_at(src));
+ for (int src = end, dst = start; src < code_entries; src++, dst++) {
+ copy(src, dst);
}
// Now the holes are at the end of the array, zap them for heap-verifier.
int removed = end - start;
- for (int i = number_of_entries - removed; i < number_of_entries; i++) {
- clear_code_at(i);
+ for (int i = code_entries - removed; i < code_entries; i++) {
+ clear_at(i);
}
set_number_of_entries(group, 0);
DeoptimizeDependentCodeFilter filter;
@@ -11194,13 +11296,14 @@ void DependentCode::DeoptimizeDependentCodeGroup(
}
-MaybeObject* JSReceiver::SetPrototype(Object* value,
+Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
- int size = Size();
+ int size = object->Size();
#endif
- Isolate* isolate = GetIsolate();
+ Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
@@ -11214,70 +11317,64 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// Implementation specific extensions that modify [[Class]], [[Prototype]]
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
- if (!this->map()->is_extensible()) {
- HandleScope scope(isolate);
- Handle<Object> handle(this, isolate);
- return isolate->Throw(
- *isolate->factory()->NewTypeError("non_extensible_proto",
- HandleVector<Object>(&handle, 1)));
+ if (!object->map()->is_extensible()) {
+ Handle<Object> args[] = { object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "non_extensible_proto", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// Before we can set the prototype we need to be sure
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
- for (Object* pt = value;
+ for (Object* pt = *value;
pt != heap->null_value();
pt = pt->GetPrototype(isolate)) {
- if (JSReceiver::cast(pt) == this) {
+ if (JSReceiver::cast(pt) == *object) {
// Cycle detected.
- HandleScope scope(isolate);
- return isolate->Throw(
- *isolate->factory()->NewError("cyclic_proto",
- HandleVector<Object>(NULL, 0)));
+ Handle<Object> error = isolate->factory()->NewError(
+ "cyclic_proto", HandleVector<Object>(NULL, 0));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- JSReceiver* real_receiver = this;
+ Handle<JSObject> real_receiver = object;
if (skip_hidden_prototypes) {
// Find the first object in the chain whose prototype object is not
// hidden and set the new prototype on that object.
Object* current_proto = real_receiver->GetPrototype();
while (current_proto->IsJSObject() &&
- JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSReceiver::cast(current_proto);
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ real_receiver = handle(JSObject::cast(current_proto), isolate);
current_proto = current_proto->GetPrototype(isolate);
}
}
// Set the new prototype of the object.
- Map* map = real_receiver->map();
+ Handle<Map> map(real_receiver->map());
// Nothing to do if prototype is already set.
- if (map->prototype() == value) return value;
+ if (map->prototype() == *value) return value;
if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
- Map* new_map = map->GetPrototypeTransition(value);
- if (new_map == NULL) {
- MaybeObject* maybe_new_map = map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, new_map);
- if (maybe_new_cache->IsFailure()) return maybe_new_cache;
-
- new_map->set_prototype(value);
+ Handle<Map> new_map = Map::GetPrototypeTransition(map, value);
+ if (new_map.is_null()) {
+ new_map = Map::Copy(map);
+ Map::PutPrototypeTransition(map, value, new_map);
+ new_map->set_prototype(*value);
}
- ASSERT(new_map->prototype() == value);
- real_receiver->set_map(new_map);
+ ASSERT(new_map->prototype() == *value);
+ real_receiver->set_map(*new_map);
heap->ClearInstanceofCache();
- ASSERT(size == Size());
+ ASSERT(size == object->Size());
return value;
}
@@ -11363,12 +11460,8 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(setter, index, v8::Utils::ToLocal(value_handle));
- }
+ v8::Handle<v8::Value> result =
+ args.Call(setter, index, v8::Utils::ToLocal(value_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
@@ -11406,12 +11499,7 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
PropertyCallbackArguments
args(isolate, data->data(), *self, *holder_handle);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(call_fun, v8::Utils::ToLocal(key));
- }
+ v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return isolate->heap()->undefined_value();
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -11473,13 +11561,9 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
PropertyCallbackArguments
args(isolate, data->data(), *self, *holder_handle);
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- args.Call(call_fun,
- v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- }
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
@@ -11958,7 +12042,7 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
StrictModeFlag strict_mode,
SetPropertyMode set_mode) {
if (object->HasExternalArrayElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number = Execution::ToNumber(value, &has_exception);
if (has_exception) return Handle<Object>();
@@ -12236,8 +12320,8 @@ MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) {
return payload->TransitionElementsKind(to_kind);
}
}
- } else if (info->payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload());
+ } else if (info->payload()->IsCell()) {
+ Cell* cell = Cell::cast(info->payload());
Object* cell_contents = cell->value();
if (cell_contents->IsSmi()) {
ElementsKind kind = static_cast<ElementsKind>(
@@ -12379,12 +12463,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
PropertyCallbackArguments
args(isolate, interceptor->data(), receiver, this);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(getter, index);
- }
+ v8::Handle<v8::Value> result = args.Call(getter, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -12689,12 +12768,8 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
PropertyCallbackArguments
args(isolate, interceptor->data(), receiver, this);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- result = args.Call(getter, v8::Utils::ToLocal(name_handle));
- }
+ v8::Handle<v8::Value> result =
+ args.Call(getter, v8::Utils::ToLocal(name_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
*attributes = NONE;
@@ -14159,20 +14234,20 @@ MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
}
-JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
+PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- return JSGlobalPropertyCell::cast(value);
+ return PropertyCell::cast(value);
}
-Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
+Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
Handle<GlobalObject> global,
Handle<Name> name) {
Isolate* isolate = global->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
global->EnsurePropertyCell(*name),
- JSGlobalPropertyCell);
+ PropertyCell);
}
@@ -14183,7 +14258,7 @@ MaybeObject* GlobalObject::EnsurePropertyCell(Name* name) {
Heap* heap = GetHeap();
Object* cell;
{ MaybeObject* maybe_cell =
- heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
+ heap->AllocatePropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
PropertyDetails details(NONE, NORMAL, 0);
@@ -14197,7 +14272,7 @@ MaybeObject* GlobalObject::EnsurePropertyCell(Name* name) {
return cell;
} else {
Object* value = property_dictionary()->ValueAt(entry);
- ASSERT(value->IsJSGlobalPropertyCell());
+ ASSERT(value->IsPropertyCell());
return value;
}
}
@@ -14980,8 +15055,8 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
if (Dictionary<Shape, Key>::IsKey(k)) {
Object* e = ValueAt(i);
- if (e->IsJSGlobalPropertyCell()) {
- e = JSGlobalPropertyCell::cast(e)->value();
+ if (e->IsPropertyCell()) {
+ e = PropertyCell::cast(e)->value();
}
if (e == value) return k;
}
@@ -15703,11 +15778,51 @@ void JSArrayBuffer::Neuter() {
}
-void JSTypedArray::Neuter() {
+void JSArrayBufferView::NeuterView() {
set_byte_offset(Smi::FromInt(0));
set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSDataView::Neuter() {
+ NeuterView();
+}
+
+
+void JSTypedArray::Neuter() {
+ NeuterView();
set_length(Smi::FromInt(0));
set_elements(GetHeap()->EmptyExternalArrayForMap(map()));
}
+
+Type* PropertyCell::type() {
+ return static_cast<Type*>(type_raw());
+}
+
+
+void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
+ set_type_raw(type, ignored);
+}
+
+
+void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup,
+ info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
+ Handle<HeapObject>(this), info->zone());
+}
+
+
+void PropertyCell::AddDependentCode(Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()),
+ DependentCode::kPropertyCellChangedGroup, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 1ee31b6dee..416ed7fcda 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -32,6 +32,7 @@
#include "assert-scope.h"
#include "builtins.h"
#include "elements-kind.h"
+#include "flags.h"
#include "list.h"
#include "property-details.h"
#include "smart-pointers.h"
@@ -58,7 +59,9 @@
// - JSObject
// - JSArray
// - JSArrayBuffer
-// - JSTypedArray
+// - JSArrayBufferView
+// - JSTypedArray
+// - JSDataView
// - JSSet
// - JSMap
// - JSWeakMap
@@ -119,6 +122,8 @@
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
+// - Cell
+// - PropertyCell
// - Code
// - Map
// - Oddball
@@ -348,7 +353,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(MAP_TYPE) \
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(CELL_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
V(BOX_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
@@ -405,6 +411,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_REGEXP_TYPE) \
@@ -569,8 +576,7 @@ const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
// Bit 6 indicates that the object is an internalized string (if set) or not.
-// There are not enough types that the non-string types (with bit 7 set) can
-// have bit 6 set too.
+// Bit 7 has to be clear as well.
const uint32_t kIsInternalizedMask = 0x40;
const uint32_t kNotInternalizedTag = 0x0;
const uint32_t kInternalizedTag = 0x40;
@@ -669,7 +675,8 @@ enum InstanceType {
MAP_TYPE,
CODE_TYPE,
ODDBALL_TYPE,
- JS_GLOBAL_PROPERTY_CELL_TYPE,
+ CELL_TYPE,
+ PROPERTY_CELL_TYPE,
BOX_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
@@ -740,6 +747,7 @@ enum InstanceType {
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
+ JS_DATA_VIEW_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
JS_WEAK_MAP_TYPE,
@@ -841,6 +849,7 @@ class Failure;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
+class Type;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -988,7 +997,9 @@ class MaybeObject BASE_EMBEDDED {
V(Boolean) \
V(JSArray) \
V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
V(JSTypedArray) \
+ V(JSDataView) \
V(JSProxy) \
V(JSFunctionProxy) \
V(JSSet) \
@@ -1011,7 +1022,8 @@ class MaybeObject BASE_EMBEDDED {
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
- V(JSGlobalPropertyCell) \
+ V(Cell) \
+ V(PropertyCell) \
V(ObjectHashTable) \
@@ -1692,10 +1704,6 @@ class JSReceiver: public HeapObject {
// Return the constructor function (may be Heap::null_value()).
inline Object* GetConstructor();
- // Set the object's prototype (only JSReceiver and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
-
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet and OMIT_CREATION was used.
inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
@@ -1897,6 +1905,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(Name* name,
DeleteMode mode);
+ static void OptimizeAsPrototype(Handle<JSObject> object);
MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
// Retrieve interceptors.
@@ -1924,19 +1933,7 @@ class JSObject: public JSReceiver {
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* DefineAccessor(Name* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- // Try to define a single accessor paying attention to map transitions.
- // Returns a JavaScript null if this was not possible and we have to use the
- // slow case. Note that we can fail due to allocations, too.
- MUST_USE_RESULT MaybeObject* DefineFastAccessor(
- Name* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes);
+
Object* LookupAccessor(Name* name, AccessorComponent component);
MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
@@ -1984,7 +1981,7 @@ class JSObject: public JSReceiver {
Handle<Object> value);
// Returns a failure if a GC is required.
MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value);
- // Gets the value of a hidden property with the given key. Returns undefined
+ // Gets the value of a hidden property with the given key. Returns the hole
// if the property doesn't exist (or if called on a detached proxy),
// otherwise returns the value set for the key.
Object* GetHiddenProperty(Name* key);
@@ -2300,6 +2297,11 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode
= UPDATE_WRITE_BARRIER);
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ static Handle<Object> SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
+ bool skip_hidden_prototypes = false);
+
// Initializes the body after properties slot, properties slot is
// initialized by set_properties. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
@@ -2502,18 +2504,26 @@ class JSObject: public JSReceiver {
Name* name,
Object* structure,
PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* DefineElementAccessor(
- uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(Name* name);
- MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
- Name* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
+ static void DefineElementAccessor(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
+ static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
+ Handle<Name> name);
+ static void DefinePropertyAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
+ // Try to define a single accessor paying attention to map transitions.
+ // Returns false if this was not possible and we have to use the slow case.
+ static bool DefineFastAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes);
enum InitializeHiddenProperties {
CREATE_NEW_IF_ABSENT,
@@ -2565,7 +2575,7 @@ class FixedArray: public FixedArrayBase {
inline void set(int index, Object* value);
inline bool is_the_hole(int index);
- // Setter that doesn't need write barrier).
+ // Setter that doesn't need write barrier.
inline void set(int index, Smi* value);
// Setter with explicit barrier mode.
inline void set(int index, Object* value, WriteBarrierMode mode);
@@ -2579,12 +2589,6 @@ class FixedArray: public FixedArrayBase {
inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
- // Setters with less debug checks for the GC to use.
- inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(Heap* heap, int index);
- inline void set_unchecked(Heap* heap, int index, Object* value,
- WriteBarrierMode mode);
-
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
@@ -4378,7 +4382,8 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
-class JSGlobalPropertyCell;
+class Cell;
+class PropertyCell;
// TypeFeedbackCells is a fixed array used to hold the association between
// cache cells and AST ids for code generated by the full compiler.
@@ -4395,8 +4400,8 @@ class TypeFeedbackCells: public FixedArray {
inline void SetAstId(int index, TypeFeedbackId id);
// Accessors for global property cells holding the cache values.
- inline JSGlobalPropertyCell* Cell(int index);
- inline void SetCell(int index, JSGlobalPropertyCell* cell);
+ inline Cell* GetCell(int index);
+ inline void SetCell(int index, Cell* cell);
// The object that indicates an uninitialized cache.
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
@@ -4432,38 +4437,40 @@ class Code: public HeapObject {
// cache state, and arguments count.
typedef uint32_t Flags;
-#define CODE_KIND_LIST(V) \
- V(FUNCTION) \
- V(OPTIMIZED_FUNCTION) \
- V(STUB) \
- V(BUILTIN) \
- V(LOAD_IC) \
- V(KEYED_LOAD_IC) \
- V(CALL_IC) \
- V(KEYED_CALL_IC) \
- V(STORE_IC) \
- V(KEYED_STORE_IC) \
- V(UNARY_OP_IC) \
- V(BINARY_OP_IC) \
- V(COMPARE_IC) \
- V(COMPARE_NIL_IC) \
+#define NON_IC_KIND_LIST(V) \
+ V(FUNCTION) \
+ V(OPTIMIZED_FUNCTION) \
+ V(STUB) \
+ V(BUILTIN) \
+ V(REGEXP)
+
+#define IC_KIND_LIST(V) \
+ V(LOAD_IC) \
+ V(KEYED_LOAD_IC) \
+ V(CALL_IC) \
+ V(KEYED_CALL_IC) \
+ V(STORE_IC) \
+ V(KEYED_STORE_IC) \
+ V(UNARY_OP_IC) \
+ V(BINARY_OP_IC) \
+ V(COMPARE_IC) \
+ V(COMPARE_NIL_IC) \
V(TO_BOOLEAN_IC)
+#define CODE_KIND_LIST(V) \
+ NON_IC_KIND_LIST(V) \
+ IC_KIND_LIST(V)
+
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
-
- // Pseudo-kinds.
- LAST_CODE_KIND = TO_BOOLEAN_IC,
- REGEXP = BUILTIN,
- FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = TO_BOOLEAN_IC
+ NUMBER_OF_KINDS
};
// No more than 16 kinds. The value is currently encoded in four bits in
// Flags.
- STATIC_ASSERT(LAST_CODE_KIND < 16);
+ STATIC_ASSERT(NUMBER_OF_KINDS <= 16);
static const char* Kind2String(Kind kind);
@@ -4483,10 +4490,6 @@ class Code: public HeapObject {
PROTOTYPE_STUB
};
- enum {
- NUMBER_OF_KINDS = LAST_IC_KIND + 1
- };
-
typedef int ExtraICState;
static const ExtraICState kNoExtraICState = 0;
@@ -4546,7 +4549,6 @@ class Code: public HeapObject {
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
- inline FixedArray* unchecked_deoptimization_data();
inline int relocation_size();
@@ -4662,7 +4664,8 @@ class Code: public HeapObject {
inline byte to_boolean_state();
// [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
- byte compare_nil_types();
+ byte compare_nil_state();
+ byte compare_nil_value();
// [has_function_cache]: For kind STUB tells whether there is a function
// cache is passed to the stub.
@@ -4680,6 +4683,10 @@ class Code: public HeapObject {
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
+ // Find an object in a stub with a specified map
+ Object* FindNthObject(int n, Map* match_map);
+ void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
+
// Find the first map in an IC stub.
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
@@ -4692,6 +4699,8 @@ class Code: public HeapObject {
// Find the first name in an IC stub.
Name* FindFirstName();
+ void ReplaceNthCell(int n, Cell* replace_with);
+
class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
@@ -4820,13 +4829,18 @@ class Code: public HeapObject {
};
#undef DECLARE_CODE_AGE_ENUM
- // Code aging
+ // Code aging. Indicates how many full GCs this code has survived without
+ // being entered through the prologue. Used to determine when it is
+ // relatively safe to flush this code object and replace it with the lazy
+ // compilation stub.
static void MakeCodeAgeSequenceYoung(byte* sequence);
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
+ int GetAge();
void PrintDeoptLocation(int bailout_id);
+ bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
void VerifyEmbeddedMapsDependency();
@@ -4966,8 +4980,8 @@ class Code: public HeapObject {
// Code aging
byte* FindCodeAgeSequence();
- static void GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity);
+ static void GetCodeAgeAndParity(Code* code, Age* age,
+ MarkingParity* parity);
static void GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity);
static Code* GetCodeAgeStub(Age age, MarkingParity parity);
@@ -4980,6 +4994,8 @@ class Code: public HeapObject {
};
+class CompilationInfo;
+
// This class describes the layout of dependent codes array of a map. The
// array is partitioned into several groups of dependent codes. Each group
// contains codes with the same dependency on the map. The array has the
@@ -5016,7 +5032,10 @@ class DependentCode: public FixedArray {
// Group of code that depends on elements not being added to objects with
// this map.
kElementsCantBeAddedGroup,
- kGroupCount = kElementsCantBeAddedGroup + 1
+ // Group of code that depends on global property values in property cells
+ // not being changed.
+ kPropertyCellChangedGroup,
+ kGroupCount = kPropertyCellChangedGroup + 1
};
// Array for holding the index of the first code object of each group.
@@ -5033,8 +5052,14 @@ class DependentCode: public FixedArray {
bool Contains(DependencyGroup group, Code* code);
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Code> value);
+ DependencyGroup group,
+ Handle<Object> object);
+ void UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code);
+ void RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
@@ -5042,12 +5067,19 @@ class DependentCode: public FixedArray {
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
inline void set_number_of_entries(DependencyGroup group, int value);
+ inline bool is_code_at(int i);
inline Code* code_at(int i);
- inline void set_code_at(int i, Code* value);
- inline Object** code_slot_at(int i);
- inline void clear_code_at(int i);
+ inline CompilationInfo* compilation_info_at(int i);
+ inline void set_object_at(int i, Object* object);
+ inline Object** slot_at(int i);
+ inline Object* object_at(int i);
+ inline void clear_at(int i);
+ inline void copy(int from, int to);
static inline DependentCode* cast(Object* object);
+ static DependentCode* ForObject(Handle<HeapObject> object,
+ DependencyGroup group);
+
private:
// Make a room at the end of the given group by moving out the first
// code objects of the subsequent groups.
@@ -5300,8 +5332,6 @@ class Map: public HeapObject {
// [constructor]: points back to the function responsible for this map.
DECL_ACCESSORS(constructor, Object)
- inline JSFunction* unchecked_constructor();
-
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
inline void InitializeDescriptors(DescriptorArray* descriptors);
@@ -5353,8 +5383,7 @@ class Map: public HeapObject {
inline void SetNumberOfProtoTransitions(int value) {
FixedArray* cache = GetPrototypeTransitions();
ASSERT(cache->length() != 0);
- cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
- Smi::FromInt(value));
+ cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value));
}
// Lookup in the map's instance descriptors and fill out the result
@@ -5388,7 +5417,7 @@ class Map: public HeapObject {
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
}
- inline JSGlobalPropertyCell* RetrieveDescriptorsPointer();
+ inline Cell* RetrieveDescriptorsPointer();
int EnumLength() {
return EnumLengthBits::decode(bit_field3());
@@ -5453,6 +5482,7 @@ class Map: public HeapObject {
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
+ static Handle<Map> Copy(Handle<Map> map);
MUST_USE_RESULT MaybeObject* Copy();
// Returns the next free property index (only valid for FAST MODE).
@@ -5463,13 +5493,6 @@ class Map: public HeapObject {
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
PropertyAttributes filter = NONE);
- // Returns the number of slots allocated for the initial properties
- // backing storage for instances of this map.
- int InitialPropertiesLength() {
- return pre_allocated_property_fields() + unused_property_fields() -
- inobject_properties();
- }
-
// Casting.
static inline Map* cast(Object* obj);
@@ -5554,8 +5577,11 @@ class Map: public HeapObject {
inline bool CanOmitPrototypeChecks();
- inline void AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code);
+ void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
+ void AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code);
bool IsMapInArrayPrototypeChain();
@@ -5583,11 +5609,11 @@ class Map: public HeapObject {
// transitions are in the form of a map where the keys are prototype objects
// and the values are the maps the are transitioned to.
static const int kMaxCachedPrototypeTransitions = 256;
-
- Map* GetPrototypeTransition(Object* prototype);
-
- MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
- Map* map);
+ static Handle<Map> GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype);
+ static Handle<Map> PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map);
static const int kMaxPreAllocatedPropertyFields = 255;
@@ -5738,7 +5764,7 @@ class Script: public Struct {
DECL_ACCESSORS(name, Object)
// [id]: the script id.
- DECL_ACCESSORS(id, Object)
+ DECL_ACCESSORS(id, Smi)
// [line_offset]: script line offset in resource from where it was extracted.
DECL_ACCESSORS(line_offset, Smi)
@@ -5847,8 +5873,8 @@ class Script: public Struct {
V(Math, min, MathMin) \
V(Math, imul, MathImul)
-
enum BuiltinFunctionId {
+ kArrayCode,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -5917,8 +5943,6 @@ class SharedFunctionInfo: public HeapObject {
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
- inline Code* unchecked_code();
-
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -6128,14 +6152,6 @@ class SharedFunctionInfo: public HeapObject {
// iteration by the debugger).
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
- // Indicates how many full GCs this function has survived with assigned
- // code object. Used to determine when it is relatively safe to flush
- // this code object and replace it with lazy compilation stub.
- // Age is reset when GC notices that the code object is referenced
- // from the stack or compilation cache.
- inline int code_age();
- inline void set_code_age(int age);
-
// Indicates whether optimizations have been disabled for this
// shared function info. If a function is repeatedly optimized or if
// we cannot optimize the function we disable optimization to avoid
@@ -6379,15 +6395,11 @@ class SharedFunctionInfo: public HeapObject {
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kCodeAgeSize = 3;
- static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
-
enum CompilerHints {
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kLiveObjectsMayExist,
- kCodeAgeShift,
- kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+ kOptimizationDisabled,
kStrictModeFunction,
kExtendedModeFunction,
kUsesArguments,
@@ -6565,11 +6577,8 @@ class JSFunction: public JSObject {
// can be shared by instances.
DECL_ACCESSORS(shared, SharedFunctionInfo)
- inline SharedFunctionInfo* unchecked_shared();
-
// [context]: The context for this function.
inline Context* context();
- inline Object* unchecked_context();
inline void set_context(Object* context);
// [code]: The generated code object for this function. Executed
@@ -6581,8 +6590,6 @@ class JSFunction: public JSObject {
inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
- inline Code* unchecked_code();
-
// Tells whether this function is builtin.
inline bool IsBuiltin();
@@ -6800,7 +6807,7 @@ class GlobalObject: public JSObject {
DECL_ACCESSORS(global_receiver, JSObject)
// Retrieve the property cell used to store a property.
- JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
+ PropertyCell* GetPropertyCell(LookupResult* result);
// This is like GetProperty, but is used when you know the lookup won't fail
// by throwing an exception. This is for the debug and builtins global
@@ -6812,7 +6819,7 @@ class GlobalObject: public JSObject {
}
// Ensure that the global object has a cell for the given property name.
- static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
+ static Handle<PropertyCell> EnsurePropertyCell(
Handle<GlobalObject> global,
Handle<Name> name);
// TODO(kmillikin): This function can be eliminated once the stub cache is
@@ -7111,11 +7118,6 @@ class JSRegExp: public JSObject {
// Set implementation data after the object has been prepared.
inline void SetDataAt(int index, Object* value);
- // Used during GC when flushing code or setting age.
- inline Object* DataAtUnchecked(int index);
- inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
- inline Type TypeTagUnchecked();
-
static int code_index(bool is_ascii) {
if (is_ascii) {
return kIrregexpASCIICodeIndex;
@@ -8539,16 +8541,18 @@ class Oddball: public HeapObject {
};
-class JSGlobalPropertyCell: public HeapObject {
+class Cell: public HeapObject {
public:
// [value]: value of the global property.
DECL_ACCESSORS(value, Object)
// Casting.
- static inline JSGlobalPropertyCell* cast(Object* obj);
+ static inline Cell* cast(Object* obj);
- static inline JSGlobalPropertyCell* FromValueAddress(Address value) {
- return cast(FromAddress(value - kValueOffset));
+ static inline Cell* FromValueAddress(Address value) {
+ Object* result = FromAddress(value - kValueOffset);
+ ASSERT(result->IsCell() || result->IsPropertyCell());
+ return static_cast<Cell*>(result);
}
inline Address ValueAddress() {
@@ -8556,8 +8560,8 @@ class JSGlobalPropertyCell: public HeapObject {
}
// Dispatched behavior.
- DECLARE_PRINTER(JSGlobalPropertyCell)
- DECLARE_VERIFIER(JSGlobalPropertyCell)
+ DECLARE_PRINTER(Cell)
+ DECLARE_VERIFIER(Cell)
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
@@ -8568,7 +8572,50 @@ class JSGlobalPropertyCell: public HeapObject {
kSize> BodyDescriptor;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
+};
+
+
+class PropertyCell: public Cell {
+ public:
+ // [type]: type of the global property.
+ Type* type();
+ void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // [dependent_code]: dependent code that depends on the type of the global
+ // property.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
+ // Casting.
+ static inline PropertyCell* cast(Object* obj);
+
+ inline Address TypeAddress() {
+ return address() + kTypeOffset;
+ }
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(PropertyCell)
+ DECLARE_VERIFIER(PropertyCell)
+
+ // Layout description.
+ static const int kTypeOffset = kValueOffset + kPointerSize;
+ static const int kDependentCodeOffset = kTypeOffset + kPointerSize;
+ static const int kSize = kDependentCodeOffset + kPointerSize;
+
+ static const int kPointerFieldsBeginOffset = kValueOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ typedef FixedBodyDescriptor<kValueOffset,
+ kSize,
+ kSize> BodyDescriptor;
+
+ void AddDependentCompilationInfo(CompilationInfo* info);
+
+ void AddDependentCode(Handle<Code> code);
+
+ private:
+ DECL_ACCESSORS(type_raw, Object)
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
};
@@ -8789,8 +8836,8 @@ class JSArrayBuffer: public JSObject {
// [weak_next]: linked list of array buffers.
DECL_ACCESSORS(weak_next, Object)
- // [weak_first_array]: weak linked list of typed arrays.
- DECL_ACCESSORS(weak_first_array, Object)
+ // [weak_first_array]: weak linked list of views.
+ DECL_ACCESSORS(weak_first_view, Object)
// Casting.
static inline JSArrayBuffer* cast(Object* obj);
@@ -8806,8 +8853,8 @@ class JSArrayBuffer: public JSObject {
static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
static const int kFlagOffset = kByteLengthOffset + kPointerSize;
static const int kWeakNextOffset = kFlagOffset + kPointerSize;
- static const int kWeakFirstArrayOffset = kWeakNextOffset + kPointerSize;
- static const int kSize = kWeakFirstArrayOffset + kPointerSize;
+ static const int kWeakFirstViewOffset = kWeakNextOffset + kPointerSize;
+ static const int kSize = kWeakFirstViewOffset + kPointerSize;
static const int kSizeWithInternalFields =
kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
@@ -8820,7 +8867,7 @@ class JSArrayBuffer: public JSObject {
};
-class JSTypedArray: public JSObject {
+class JSArrayBufferView: public JSObject {
public:
// [buffer]: ArrayBuffer that this typed array views.
DECL_ACCESSORS(buffer, Object)
@@ -8831,12 +8878,33 @@ class JSTypedArray: public JSObject {
// [byte_length]: length of typed array in bytes.
DECL_ACCESSORS(byte_length, Object)
- // [length]: length of typed array in elements.
- DECL_ACCESSORS(length, Object)
-
// [weak_next]: linked list of typed arrays over the same array buffer.
DECL_ACCESSORS(weak_next, Object)
+ // Casting.
+ static inline JSArrayBufferView* cast(Object* obj);
+
+ DECLARE_VERIFIER(JSArrayBufferView)
+
+ static const int kBufferOffset = JSObject::kHeaderSize;
+ static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+ static const int kWeakNextOffset = kByteLengthOffset + kPointerSize;
+ static const int kViewSize = kWeakNextOffset + kPointerSize;
+
+ protected:
+ void NeuterView();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+};
+
+
+class JSTypedArray: public JSArrayBufferView {
+ public:
+ // [length]: length of typed array in elements.
+ DECL_ACCESSORS(length, Object)
+
// Neutering. Only neuters this typed array.
void Neuter();
@@ -8850,18 +8918,33 @@ class JSTypedArray: public JSObject {
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
- static const int kBufferOffset = JSObject::kHeaderSize;
- static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
- static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
- static const int kLengthOffset = kByteLengthOffset + kPointerSize;
- static const int kWeakNextOffset = kLengthOffset + kPointerSize;
- static const int kSize = kWeakNextOffset + kPointerSize;
+ static const int kLengthOffset = kViewSize + kPointerSize;
+ static const int kSize = kLengthOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
};
+class JSDataView: public JSArrayBufferView {
+ public:
+ // Only neuters this DataView
+ void Neuter();
+
+ // Casting.
+ static inline JSDataView* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSDataView)
+ DECLARE_VERIFIER(JSDataView)
+
+ static const int kSize = kViewSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+};
+
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
@@ -9173,7 +9256,7 @@ class AccessorPair: public Struct {
static inline AccessorPair* cast(Object* obj);
- MUST_USE_RESULT MaybeObject* Copy();
+ static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
Object* get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
@@ -9587,7 +9670,7 @@ class ObjectVisitor BASE_EMBEDDED {
virtual void VisitCodeEntry(Address entry_address);
// Visits a global property cell reference in the instruction stream.
- virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
+ virtual void VisitCell(RelocInfo* rinfo);
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index b2abc813ab..21ef237107 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -39,7 +39,9 @@ namespace internal {
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- thread_id_ = ThreadId::Current().ToInteger();
+ { ScopedLock lock(thread_id_mutex_);
+ thread_id_ = ThreadId::Current().ToInteger();
+ }
#endif
Isolate::SetIsolateThreadLocals(isolate_, NULL);
DisallowHeapAllocation no_allocation;
@@ -89,8 +91,9 @@ void OptimizingCompilerThread::CompileNext() {
ASSERT(status != OptimizingCompiler::FAILED);
// The function may have already been optimized by OSR. Simply continue.
- // Mark it for installing before queuing so that we can be sure of the write
- // order: marking first and (after being queued) installing code second.
+ // Use a mutex to make sure that functions marked for install
+ // are always also queued.
+ ScopedLock mark_and_queue(install_mutex_);
{ Heap::RelocationLock relocation_lock(isolate_->heap());
AllowHandleDereference ahd;
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
@@ -106,12 +109,18 @@ void OptimizingCompilerThread::Stop() {
stop_semaphore_->Wait();
if (FLAG_parallel_recompilation_delay != 0) {
- InstallOptimizedFunctions();
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
- while (NoBarrier_Load(&queue_length_) > 0) {
- CompileNext();
- InstallOptimizedFunctions();
+ while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+ InstallOptimizedFunctions();
+ } else {
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (input_queue_.Dequeue(&optimizing_compiler)) {
+ delete optimizing_compiler->info();
+ }
+ while (output_queue_.Dequeue(&optimizing_compiler)) {
+ delete optimizing_compiler->info();
}
}
@@ -121,18 +130,21 @@ void OptimizingCompilerThread::Stop() {
double percentage = (compile_time * 100) / total_time;
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
+
+ Join();
}
void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- int functions_installed = 0;
- while (!output_queue_.IsEmpty()) {
- OptimizingCompiler* compiler;
- output_queue_.Dequeue(&compiler);
+ OptimizingCompiler* compiler;
+ while (true) {
+ { // Memory barrier to ensure marked functions are queued.
+ ScopedLock marked_and_queued(install_mutex_);
+ if (!output_queue_.Dequeue(&compiler)) return;
+ }
Compiler::InstallOptimizedCode(compiler);
- functions_installed++;
}
}
@@ -151,6 +163,7 @@ void OptimizingCompilerThread::QueueForOptimization(
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
if (!FLAG_parallel_recompilation) return false;
+ ScopedLock lock(thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index 8cb5e2dd59..275ceb40b7 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -31,7 +31,7 @@
#include "atomicops.h"
#include "flags.h"
#include "platform.h"
-#include "unbound-queue.h"
+#include "unbound-queue-inl.h"
namespace v8 {
namespace internal {
@@ -46,10 +46,12 @@ class OptimizingCompilerThread : public Thread {
Thread("OptimizingCompilerThread"),
#ifdef DEBUG
thread_id_(0),
+ thread_id_mutex_(OS::CreateMutex()),
#endif
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)),
+ install_mutex_(OS::CreateMutex()),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@@ -81,13 +83,18 @@ class OptimizingCompilerThread : public Thread {
#endif
~OptimizingCompilerThread() {
+ delete install_mutex_;
delete input_queue_semaphore_;
delete stop_semaphore_;
+#ifdef DEBUG
+ delete thread_id_mutex_;
+#endif
}
private:
#ifdef DEBUG
int thread_id_;
+ Mutex* thread_id_mutex_;
#endif
Isolate* isolate_;
@@ -95,6 +102,7 @@ class OptimizingCompilerThread : public Thread {
Semaphore* input_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
+ Mutex* install_mutex_;
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
int64_t time_spent_compiling_;
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index fa24bf703b..b320299748 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -566,7 +566,6 @@ Parser::Parser(CompilationInfo* info)
FunctionLiteral* Parser::ParseProgram() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -583,11 +582,11 @@ FunctionLiteral* Parser::ParseProgram() {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source);
}
if (FLAG_trace_parse && result != NULL) {
@@ -608,8 +607,7 @@ FunctionLiteral* Parser::ParseProgram() {
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope) {
+ Handle<String> source) {
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
@@ -690,15 +688,11 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
// Make sure the target stack is empty.
ASSERT(target_stack_ == NULL);
- // If there was a syntax error we have to get rid of the AST
- // and it is not safe to do so before the scope has been deleted.
- if (result == NULL) zone_scope->DeleteOnExit();
return result;
}
FunctionLiteral* Parser::ParseLazy() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -713,12 +707,12 @@ FunctionLiteral* Parser::ParseLazy() {
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
+ result = ParseLazy(&stream);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
+ result = ParseLazy(&stream);
}
if (FLAG_trace_parse && result != NULL) {
@@ -730,8 +724,7 @@ FunctionLiteral* Parser::ParseLazy() {
}
-FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope) {
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
ASSERT(top_scope_ == NULL);
@@ -779,10 +772,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
// Make sure the target stack is empty.
ASSERT(target_stack_ == NULL);
- // If there was a stack overflow we have to get rid of AST and it is
- // not safe to do before scope has been deleted.
if (result == NULL) {
- zone_scope->DeleteOnExit();
if (stack_overflow_) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
@@ -889,8 +879,8 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// Still processing directive prologue?
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
- literal->handle()->IsString()) {
- Handle<String> directive = Handle<String>::cast(literal->handle());
+ literal->value()->IsString()) {
+ Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
if (top_scope_->is_classic_mode() &&
@@ -2624,11 +2614,13 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
}
-bool Parser::CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
+bool Parser::CheckInOrOf(bool accept_OF,
+ ForEachStatement::VisitMode* visit_mode) {
if (Check(Token::IN)) {
*visit_mode = ForEachStatement::ENUMERATE;
return true;
- } else if (allow_for_of() && CheckContextualKeyword(CStrVector("of"))) {
+ } else if (allow_for_of() && accept_OF &&
+ CheckContextualKeyword(CStrVector("of"))) {
*visit_mode = ForEachStatement::ITERATE;
return true;
}
@@ -2726,11 +2718,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
Handle<String> name;
+ VariableDeclarationProperties decl_props = kHasNoInitializers;
Block* variable_statement =
- ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
+ CHECK_OK);
+ bool accept_OF = decl_props == kHasNoInitializers;
ForEachStatement::VisitMode mode;
- if (!name.is_null() && CheckInOrOf(&mode)) {
+ if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
@@ -2762,9 +2757,10 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
CHECK_OK);
bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
+ bool accept_OF = decl_props == kHasNoInitializers;
ForEachStatement::VisitMode mode;
- if (accept_IN && CheckInOrOf(&mode)) {
+ if (accept_IN && CheckInOrOf(accept_OF, &mode)) {
// Rewrite a for-in statement of the form
//
// for (let x in e) b
@@ -2820,8 +2816,9 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
ForEachStatement::VisitMode mode;
+ bool accept_OF = expression->AsVariableProxy();
- if (CheckInOrOf(&mode)) {
+ if (CheckInOrOf(accept_OF, &mode)) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report
@@ -3060,10 +3057,10 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
- double y_val = y->AsLiteral()->handle()->Number();
+ if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
+ y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+ double x_val = x->AsLiteral()->value()->Number();
+ double y_val = y->AsLiteral()->value()->Number();
switch (op) {
case Token::ADD:
@@ -3162,7 +3159,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Expression* expression = ParseUnaryExpression(CHECK_OK);
if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->handle();
+ Handle<Object> literal = expression->AsLiteral()->value();
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->BooleanValue();
@@ -3727,18 +3724,6 @@ bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
}
-bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
- Expression* value) {
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) return false;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) return false;
- return true;
-}
-
-
Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
Factory* factory = Isolate::Current()->factory();
ASSERT(IsCompileTimeValue(expression));
@@ -3776,7 +3761,7 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->handle();
+ return expression->AsLiteral()->value();
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(expression);
@@ -3889,7 +3874,7 @@ void Parser::BuildObjectLiteralConstantProperties(
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
+ Handle<Object> key = property->key()->value();
Handle<Object> value = GetBoilerplateValue(property->value());
// Ensure objects that may, at any point in time, contain fields with double
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index b7e0700009..c3a7edfd9c 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -562,8 +562,7 @@ class Parser BASE_EMBEDDED {
};
FunctionLiteral* ParseLazy();
- FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope);
+ FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
@@ -571,8 +570,7 @@ class Parser BASE_EMBEDDED {
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope);
+ Handle<String> source);
// Report syntax error
void ReportUnexpectedToken(Token::Value token);
@@ -729,7 +727,7 @@ class Parser BASE_EMBEDDED {
bool is_generator() const { return current_function_state_->is_generator(); }
- bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode);
+ bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
bool peek_any_identifier();
@@ -888,8 +886,6 @@ class CompileTimeValue: public AllStatic {
static bool IsCompileTimeValue(Expression* expression);
- static bool ArrayLiteralElementNeedsInitialization(Expression* value);
-
// Get the value as a compile time value.
static Handle<FixedArray> GetValue(Expression* expression);
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 35427d4d19..bda9f923fd 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -79,12 +79,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 4305ccb288..e2c2c42de5 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -85,12 +85,6 @@ void OS::PostSetUp() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // FreeBSD runs on anything.
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 22f2245f48..2c6a36c37e 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -295,7 +295,7 @@ bool OS::MipsCpuHasFeature(CpuFeature feature) {
int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
// runtime system.
return 8;
@@ -308,19 +308,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
- (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
- // Only use on ARM or MIPS hardware.
- MemoryBarrier();
-#else
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
-#endif
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index eea1726405..21e9c7f516 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -295,12 +295,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- OSMemoryBarrier();
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -589,7 +583,7 @@ static void InitializeTlsBaseOffset() {
if (kernel_version_major < 11) {
// 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
// same offsets.
-#if defined(V8_HOST_ARCH_IA32)
+#if V8_HOST_ARCH_IA32
kMacTlsBaseOffset = 0x48;
#else
kMacTlsBaseOffset = 0x60;
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 0a7cc80f3d..b722e31e0c 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -79,7 +79,7 @@ static void* GetRandomMmapAddr() {
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uint64_t rnd1 = V8::RandomPrivate(isolate);
uint64_t rnd2 = V8::RandomPrivate(isolate);
uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
@@ -117,13 +117,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 054d5b5a50..e72a5d9bc1 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -105,7 +105,7 @@ void* OS::GetRandomMmapAddr() {
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uint64_t rnd1 = V8::RandomPrivate(isolate);
uint64_t rnd2 = V8::RandomPrivate(isolate);
uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
@@ -115,26 +115,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -336,7 +321,7 @@ int OS::VSNPrintF(Vector<char> str,
}
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
@@ -359,7 +344,7 @@ void OS::MemMove(void* dest, const void* src, size_t size) {
void POSIXPostSetUp() {
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 5fb28c84a6..4b0094fb22 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -111,12 +111,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/deps/v8/src/platform-tls-mac.h b/deps/v8/src/platform-tls-mac.h
index 728524e80b..d1c5907191 100644
--- a/deps/v8/src/platform-tls-mac.h
+++ b/deps/v8/src/platform-tls-mac.h
@@ -33,7 +33,7 @@
namespace v8 {
namespace internal {
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
#define V8_FAST_TLS_SUPPORTED 1
@@ -43,7 +43,7 @@ INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
intptr_t result;
-#if defined(V8_HOST_ARCH_IA32)
+#if V8_HOST_ARCH_IA32
asm("movl %%gs:(%1,%2,4), %0;"
:"=r"(result) // Output must be a writable register.
:"r"(kMacTlsBaseOffset), "r"(index));
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 6795844760..191376099c 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -147,7 +147,7 @@ double ceiling(double x) {
static Mutex* limit_mutex = NULL;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
@@ -580,7 +580,7 @@ void OS::PostSetUp() {
// Math functions depend on CPU features therefore they are initialized after
// CPU.
MathSetup();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
@@ -1485,12 +1485,6 @@ int OS::ActivationFrameAlignment() {
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 86706fe3c3..24d21cb3ae 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -100,7 +100,6 @@ int random();
#endif // WIN32
-#include "atomicops.h"
#include "lazy-instance.h"
#include "platform-tls.h"
#include "utils.h"
@@ -330,8 +329,6 @@ class OS {
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
- static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 828177aee0..3268e3c508 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -659,10 +659,9 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
}
-bool PreParser::CheckInOrOf() {
+bool PreParser::CheckInOrOf(bool accept_OF) {
if (peek() == i::Token::IN ||
- (allow_for_of() &&
- peek() == i::Token::IDENTIFIER &&
+ (allow_for_of() && accept_OF && peek() == i::Token::IDENTIFIER &&
scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
Next();
return true;
@@ -685,9 +684,10 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
VariableDeclarationProperties decl_props = kHasNoInitializers;
ParseVariableDeclarations(
kForStatement, &decl_props, &decl_count, CHECK_OK);
- bool accept_IN = decl_count == 1 &&
- !(is_let && decl_props == kHasInitializers);
- if (accept_IN && CheckInOrOf()) {
+ bool has_initializers = decl_props == kHasInitializers;
+ bool accept_IN = decl_count == 1 && !(is_let && has_initializers);
+ bool accept_OF = !has_initializers;
+ if (accept_IN && CheckInOrOf(accept_OF)) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
@@ -695,8 +695,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
return Statement::Default();
}
} else {
- ParseExpression(false, CHECK_OK);
- if (CheckInOrOf()) {
+ Expression lhs = ParseExpression(false, CHECK_OK);
+ if (CheckInOrOf(lhs.IsIdentifier())) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 786316ed50..41907d12eb 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -658,7 +658,7 @@ class PreParser {
}
void ExpectSemicolon(bool* ok);
- bool CheckInOrOf();
+ bool CheckInOrOf(bool accept_OF);
static int Precedence(i::Token::Value tok, bool accept_IN);
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 23cad95692..1824efa7f5 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -315,7 +315,7 @@ void PrettyPrinter::VisitConditional(Conditional* node) {
void PrettyPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->handle(), true);
+ PrintLiteral(node->value(), true);
}
@@ -379,11 +379,11 @@ void PrettyPrinter::VisitThrow(Throw* node) {
void PrettyPrinter::VisitProperty(Property* node) {
Expression* key = node->key();
Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->handle()->IsInternalizedString()) {
+ if (literal != NULL && literal->value()->IsInternalizedString()) {
Print("(");
Visit(node->obj());
Print(").");
- PrintLiteral(literal->handle(), false);
+ PrintLiteral(literal->value(), false);
} else {
Visit(node->obj());
Print("[");
@@ -999,7 +999,7 @@ void AstPrinter::VisitConditional(Conditional* node) {
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitLiteral(Literal* node) {
- PrintLiteralIndented("LITERAL", node->handle(), true);
+ PrintLiteralIndented("LITERAL", node->value(), true);
}
@@ -1102,8 +1102,8 @@ void AstPrinter::VisitProperty(Property* node) {
IndentedScope indent(this, "PROPERTY");
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->handle()->IsInternalizedString()) {
- PrintLiteralIndented("NAME", literal->handle(), false);
+ if (literal != NULL && literal->value()->IsInternalizedString()) {
+ PrintLiteralIndented("NAME", literal->value(), false);
} else {
PrintIndentedVisit("KEY", node->key());
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index d6e8a3775b..20c1aec731 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -44,17 +44,19 @@ const char* StringsStorage::GetFunctionName(const char* name) {
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
const char* name,
+ int security_token_id,
+ const char* name_prefix,
const char* resource_name,
- int line_number,
- int security_token_id)
+ int line_number)
: tag_(tag),
+ builtin_id_(Builtins::builtin_count),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number),
shared_id_(0),
+ script_id_(v8::Script::kNoScriptId),
security_token_id_(security_token_id),
no_frame_ranges_(NULL) {
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 5418979cc5..78b05c57e4 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -30,11 +30,12 @@
#include "profile-generator-inl.h"
#include "compiler.h"
+#include "debug.h"
+#include "sampler.h"
#include "global-handles.h"
#include "scopeinfo.h"
#include "unicode.h"
#include "zone-inl.h"
-#include "debug.h"
namespace v8 {
namespace internal {
@@ -79,7 +80,7 @@ void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
v8::Persistent<v8::Value>* handle,
void* parameter) {
reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenHandle(**handle).location());
+ Utils::OpenPersistent(handle).location());
handle->Dispose(isolate);
}
@@ -183,7 +184,9 @@ size_t StringsStorage::GetUsedMemorySize() const {
return size;
}
+
const char* const CodeEntry::kEmptyNamePrefix = "";
+const char* const CodeEntry::kEmptyResourceName = "";
CodeEntry::~CodeEntry() {
@@ -233,6 +236,12 @@ bool CodeEntry::IsSameAs(CodeEntry* entry) const {
}
+void CodeEntry::SetBuiltinId(Builtins::Name id) {
+ tag_ = Logger::BUILTIN_TAG;
+ builtin_id_ = id;
+}
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -265,12 +274,13 @@ double ProfileNode::GetTotalMillis() const {
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s [%d] #%d",
+ OS::Print("%5u %5u %*c %s%s [%d] #%d %d",
total_ticks_, self_ticks_,
indent, ' ',
entry_->name_prefix(),
entry_->name(),
entry_->security_token_id(),
+ entry_->script_id(),
id());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
@@ -296,12 +306,7 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree()
- : root_entry_(Logger::FUNCTION_TAG,
- "",
- "(root)",
- "",
- 0,
- TokenEnumerator::kNoSecurityToken),
+ : root_entry_(Logger::FUNCTION_TAG, "(root)"),
next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)) {
}
@@ -790,61 +795,6 @@ List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
}
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- Name* name,
- String* resource_name,
- int line_number) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- GetName(resource_name),
- line_number,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- Name* name) {
- CodeEntry* entry = new CodeEntry(tag,
- name_prefix,
- GetName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count) {
- CodeEntry* entry = new CodeEntry(tag,
- "args_count: ",
- GetName(args_count),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
void CpuProfilesCollection::AddPathToCurrentProfiles(
const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
@@ -858,6 +808,24 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
}
+CodeEntry* CpuProfilesCollection::NewCodeEntry(
+ Logger::LogEventsAndTags tag,
+ const char* name,
+ int security_token_id,
+ const char* name_prefix,
+ const char* resource_name,
+ int line_number) {
+ CodeEntry* code_entry = new CodeEntry(tag,
+ name,
+ security_token_id,
+ name_prefix,
+ resource_name,
+ line_number);
+ code_entries_.Add(code_entry);
+ return code_entry;
+}
+
+
void SampleRateCalculator::Tick() {
if (--wall_time_query_countdown_ == 0)
UpdateMeasurements(OS::TimeCurrentMillis());
@@ -887,6 +855,8 @@ const char* const ProfileGenerator::kProgramEntryName =
"(program)";
const char* const ProfileGenerator::kGarbageCollectorEntryName =
"(garbage collector)";
+const char* const ProfileGenerator::kUnresolvedFunctionName =
+ "(unresolved function)";
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
@@ -895,7 +865,10 @@ ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
gc_entry_(
profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)) {
+ kGarbageCollectorEntryName)),
+ unresolved_entry_(
+ profiles->NewCodeEntry(Logger::FUNCTION_TAG,
+ kUnresolvedFunctionName)) {
}
@@ -907,33 +880,45 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry** entry = entries.start();
memset(entry, 0, entries.length() * sizeof(*entry));
if (sample.pc != NULL) {
- Address start;
- CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
- // If pc is in the function code before it set up stack frame or after the
- // frame was destroyed SafeStackTraceFrameIterator incorrectly thinks that
- // ebp contains return address of the current function and skips caller's
- // frame. Check for this case and just skip such samples.
- if (pc_entry) {
- List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
- if (ranges) {
- Code* code = Code::cast(HeapObject::FromAddress(start));
- int pc_offset = static_cast<int>(sample.pc - code->instruction_start());
- for (int i = 0; i < ranges->length(); i++) {
- OffsetRange& range = ranges->at(i);
- if (range.from <= pc_offset && pc_offset < range.to) {
- return;
- }
- }
- }
- }
- *entry++ = pc_entry;
-
if (sample.has_external_callback) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- *(entries.start()) = NULL;
*entry++ = code_map_.FindEntry(sample.external_callback);
+ } else {
+ Address start;
+ CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
+ // If pc is in the function code before it set up stack frame or after the
+ // frame was destroyed SafeStackFrameIterator incorrectly thinks that
+ // ebp contains return address of the current function and skips caller's
+ // frame. Check for this case and just skip such samples.
+ if (pc_entry) {
+ List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
+ if (ranges) {
+ Code* code = Code::cast(HeapObject::FromAddress(start));
+ int pc_offset = static_cast<int>(
+ sample.pc - code->instruction_start());
+ for (int i = 0; i < ranges->length(); i++) {
+ OffsetRange& range = ranges->at(i);
+ if (range.from <= pc_offset && pc_offset < range.to) {
+ return;
+ }
+ }
+ }
+ *entry++ = pc_entry;
+
+ if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
+ pc_entry->builtin_id() == Builtins::kFunctionApply) {
+ // When current function is FunctionCall or FunctionApply builtin the
+ // top frame is either frame of the calling JS function or internal
+ // frame. In the latter case we know the caller for sure but in the
+ // former case we don't so we simply replace the frame with
+ // 'unresolved' entry.
+ if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
+ *entry++ = unresolved_entry_;
+ }
+ }
+ }
}
for (const Address* stack_pos = sample.stack,
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 7a5e1f2fc5..411cbdbab2 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -97,11 +97,11 @@ class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id));
+ int security_token_id = TokenEnumerator::kNoSecurityToken,
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo));
~CodeEntry();
INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
@@ -111,6 +111,8 @@ class CodeEntry {
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
+ INLINE(int script_id() const) { return script_id_; }
+ INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@@ -120,19 +122,25 @@ class CodeEntry {
no_frame_ranges_ = ranges;
}
+ void SetBuiltinId(Builtins::Name id);
+ Builtins::Name builtin_id() const { return builtin_id_; }
+
void CopyData(const CodeEntry& source);
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
static const char* const kEmptyNamePrefix;
+ static const char* const kEmptyResourceName;
private:
- Logger::LogEventsAndTags tag_;
+ Logger::LogEventsAndTags tag_ : 8;
+ Builtins::Name builtin_id_ : 8;
const char* name_prefix_;
const char* name_;
const char* resource_name_;
int line_number_;
int shared_id_;
+ int script_id_;
int security_token_id_;
List<OffsetRange>* no_frame_ranges_;
@@ -317,18 +325,24 @@ class CpuProfilesCollection {
const char* GetName(int args_count) {
return function_and_resource_names_.GetName(args_count);
}
+ const char* GetFunctionName(Name* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
CpuProfile* GetProfile(int security_token_id, unsigned uid);
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- Name* name, String* resource_name, int line_number);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix, Name* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
- CodeEntry* NewCodeEntry(int security_token_id);
+ CodeEntry* NewCodeEntry(
+ Logger::LogEventsAndTags tag,
+ const char* name,
+ int security_token_id = TokenEnumerator::kNoSecurityToken,
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -337,12 +351,6 @@ class CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
- const char* GetFunctionName(Name* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
int GetProfileIndex(unsigned uid);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
@@ -421,6 +429,9 @@ class ProfileGenerator {
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
static const char* const kGarbageCollectorEntryName;
+ // Used to represent frames for which we have no reliable way to
+ // detect function.
+ static const char* const kUnresolvedFunctionName;
private:
INLINE(CodeEntry* EntryForVMState(StateTag tag));
@@ -429,6 +440,7 @@ class ProfileGenerator {
CodeMap code_map_;
CodeEntry* program_entry_;
CodeEntry* gc_entry_;
+ CodeEntry* unresolved_entry_;
SampleRateCalculator sample_rate_calc_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 669b05dca0..b0d10e1270 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -110,6 +110,10 @@ class Representation {
(!IsDouble() && !other.IsDouble());
}
+ bool IsCompatibleForStore(const Representation& other) const {
+ return Equals(other);
+ }
+
bool is_more_general_than(const Representation& other) const {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index f853fc8ba0..5213ee6077 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -112,7 +112,7 @@ class ConstantFunctionDescriptor: public Descriptor {
JSFunction* function,
PropertyAttributes attributes)
: Descriptor(key, function, attributes, CONSTANT_FUNCTION,
- Representation::Tagged()) {}
+ Representation::HeapObject()) {}
};
@@ -351,7 +351,7 @@ class LookupResult BASE_EMBEDDED {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
if (holder()->IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
return value;
}
@@ -392,6 +392,11 @@ class LookupResult BASE_EMBEDDED {
return IsTransition() && GetTransitionDetails(map).type() == FIELD;
}
+ bool IsTransitionToConstantFunction(Map* map) {
+ return IsTransition() &&
+ GetTransitionDetails(map).type() == CONSTANT_FUNCTION;
+ }
+
Map* GetTransitionMap() {
ASSERT(IsTransition());
return Map::cast(GetValue());
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index c4b79b11b5..bd02a69042 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -80,11 +80,17 @@ STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+// Maximum size in bytes of generate code for a function to allow OSR.
+static const int kOSRCodeSizeAllowanceBase =
+ 100 * FullCodeGenerator::kCodeSizeMultiplier;
+
+static const int kOSRCodeSizeAllowancePerTick =
+ 3 * FullCodeGenerator::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt =
- 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
+ 5 * FullCodeGenerator::kCodeSizeMultiplier;
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
@@ -100,14 +106,13 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
}
-static void GetICCounts(JSFunction* function,
+static void GetICCounts(Code* shared_code,
int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
*ic_with_type_info_count = 0;
- Object* raw_info =
- function->shared()->code()->type_feedback_info();
+ Object* raw_info = shared_code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
*ic_with_type_info_count = info->ic_with_type_info_count();
@@ -128,7 +133,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF(" for recompilation, reason: %s", reason);
if (FLAG_type_info_threshold > 0) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage);
PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
}
PrintF("]\n");
@@ -148,9 +153,6 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation() ||
- function->IsOptimized());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -225,6 +227,8 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
+ if (isolate_->DebuggerHasBreakPoints()) return;
+
if (FLAG_parallel_recompilation) {
// Take this as opportunity to process the optimizing compiler thread's
// output queue so that it does not unnecessarily keep objects alive.
@@ -268,18 +272,35 @@ void RuntimeProfiler::OptimizeNow() {
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsInRecompileQueue()) continue;
- // Attempt OSR if we are still running unoptimized code even though the
- // the function has long been marked or even already been optimized.
- if (!frame->is_optimized() &&
+ if (FLAG_always_osr &&
+ shared_code->allow_osr_at_loop_nesting_level() == 0) {
+ // Testing mode: always try an OSR compile for every function.
+ for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
+ // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
+ shared_code->set_allow_osr_at_loop_nesting_level(i);
+ AttemptOnStackReplacement(function);
+ }
+ // Fall through and do a normal optimized compile as well.
+ } else if (!frame->is_optimized() &&
(function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForParallelRecompilation() ||
function->IsOptimized())) {
- int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting < Code::kMaxLoopNestingMarker) {
- int new_nesting = nesting + 1;
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
- AttemptOnStackReplacement(function);
+ // Attempt OSR if we are still running unoptimized code even though the
+ // the function has long been marked or even already been optimized.
+ int ticks = shared_code->profiler_ticks();
+ int allowance = kOSRCodeSizeAllowanceBase +
+ ticks * kOSRCodeSizeAllowancePerTick;
+ if (shared_code->CodeSize() > allowance) {
+ if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
+ } else {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
+ if (nesting < Code::kMaxLoopNestingMarker) {
+ int new_nesting = nesting + 1;
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ AttemptOnStackReplacement(function);
+ }
}
+ continue;
}
// Only record top-level code on top of the execution stack and
@@ -313,7 +334,7 @@ void RuntimeProfiler::OptimizeNow() {
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(shared_code, &typeinfo, &total, &percentage);
if (percentage >= FLAG_type_info_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 1bf9aa8785..46da38155f 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -75,6 +75,8 @@ class RuntimeProfiler {
void AddSample(JSFunction* function, int weight);
+ bool CodeSizeOKForOSR(Code* shared_code);
+
Isolate* isolate_;
int sampler_threshold_;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index eccf6ea4c8..e3ee6d56c3 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -38,6 +38,7 @@
#include "compilation-cache.h"
#include "compiler.h"
#include "cpu.h"
+#include "cpu-profiler.h"
#include "dateparser-inl.h"
#include "debug.h"
#include "deoptimizer.h"
@@ -650,23 +651,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
}
-static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
- Persistent<Value>* object,
- void* data) {
- Isolate* isolate = reinterpret_cast<Isolate*>(external_isolate);
- HandleScope scope(isolate);
- Handle<Object> internal_object = Utils::OpenHandle(**object);
- Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(*internal_object));
+void Runtime::FreeArrayBuffer(Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer) {
+ if (phantom_array_buffer->is_external()) return;
- if (!array_buffer->is_external()) {
- size_t allocated_length = NumberToSize(
- isolate, array_buffer->byte_length());
- isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<intptr_t>(allocated_length));
- CHECK(V8::ArrayBufferAllocator() != NULL);
- V8::ArrayBufferAllocator()->Free(data);
- }
- object->Dispose(external_isolate);
+ size_t allocated_length = NumberToSize(
+ isolate, phantom_array_buffer->byte_length());
+
+ isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<intptr_t>(allocated_length));
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ V8::ArrayBufferAllocator()->Free(phantom_array_buffer->backing_store());
}
@@ -691,7 +686,7 @@ void Runtime::SetupArrayBuffer(Isolate* isolate,
array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
isolate->heap()->set_array_buffers_list(*array_buffer);
- array_buffer->set_weak_first_array(Smi::FromInt(0));
+ array_buffer->set_weak_first_view(isolate->heap()->undefined_value());
}
@@ -711,11 +706,6 @@ bool Runtime::SetupArrayBufferAllocatingData(
SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
- v8::Isolate* external_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::Persistent<v8::Value> weak_handle(
- external_isolate, v8::Utils::ToLocal(Handle<Object>::cast(array_buffer)));
- weak_handle.MakeWeak(external_isolate, data, ArrayBufferWeakCallback);
- weak_handle.MarkIndependent(external_isolate);
isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length);
return true;
@@ -861,8 +851,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_array());
- buffer->set_weak_first_array(*holder);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
Handle<ExternalArray> elements =
isolate->factory()->NewExternalArray(
@@ -1014,6 +1004,223 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3);
+
+ holder->set_buffer(*buffer);
+ ASSERT(byte_offset->IsNumber());
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length()) >=
+ NumberToSize(isolate, *byte_offset)
+ + NumberToSize(isolate, *byte_length));
+ holder->set_byte_offset(*byte_offset);
+ ASSERT(byte_length->IsNumber());
+ holder->set_byte_length(*byte_length);
+
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetBuffer) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->buffer();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteOffset) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->byte_offset();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteLength) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->byte_length();
+}
+
+
+inline static bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ return !is_little_endian;
+#else
+ return is_little_endian;
+#endif
+}
+
+
+template<int n>
+inline void CopyBytes(uint8_t* target, uint8_t* source) {
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source++);
+ }
+}
+
+
+template<int n>
+inline void FlipBytes(uint8_t* target, uint8_t* source) {
+ source = source + (n-1);
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source--);
+ }
+}
+
+
+template<typename T>
+inline static bool DataViewGetValue(
+ Isolate* isolate,
+ Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian,
+ T* result) {
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length())
+ >= buffer_offset + sizeof(T));
+ uint8_t* source =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(value.bytes, source);
+ } else {
+ CopyBytes<sizeof(T)>(value.bytes, source);
+ }
+ *result = value.data;
+ return true;
+}
+
+
+template<typename T>
+static bool DataViewSetValue(
+ Isolate* isolate,
+ Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian,
+ T data) {
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ value.data = data;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length())
+ >= buffer_offset + sizeof(T));
+ uint8_t* target =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(target, value.bytes);
+ } else {
+ CopyBytes<sizeof(T)>(target, value.bytes);
+ }
+ return true;
+}
+
+
+#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGet##TypeName) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
+ Type result; \
+ if (DataViewGetValue( \
+ isolate, holder, offset, is_little_endian, &result)) { \
+ return isolate->heap()->Converter(result); \
+ } else { \
+ return isolate->Throw(*isolate->factory()->NewRangeError( \
+ "invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_GETTER(Uint8, uint8_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int8, int8_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint16, uint16_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int16, int16_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint32, uint32_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int32, int32_t, NumberFromInt32)
+DATA_VIEW_GETTER(Float32, float, NumberFromDouble)
+DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
+
+#undef DATA_VIEW_GETTER
+
+#define DATA_VIEW_SETTER(TypeName, Type) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 4); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
+ Type v = static_cast<Type>(value->Number()); \
+ if (DataViewSetValue( \
+ isolate, holder, offset, is_little_endian, v)) { \
+ return isolate->heap()->undefined_value(); \
+ } else { \
+ return isolate->Throw(*isolate->factory()->NewRangeError( \
+ "invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_SETTER(Uint8, uint8_t)
+DATA_VIEW_SETTER(Int8, int8_t)
+DATA_VIEW_SETTER(Uint16, uint16_t)
+DATA_VIEW_SETTER(Int16, int16_t)
+DATA_VIEW_SETTER(Uint32, uint32_t)
+DATA_VIEW_SETTER(Int32, int32_t)
+DATA_VIEW_SETTER(Float32, float)
+DATA_VIEW_SETTER(Float64, double)
+
+#undef DATA_VIEW_SETTER
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -1244,31 +1451,29 @@ static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_CHECKED(Object, prototype, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
if (FLAG_harmony_observation && obj->map()->is_observed()) {
- HandleScope scope(isolate);
- Handle<JSObject> receiver(obj);
- Handle<Object> value(prototype, isolate);
Handle<Object> old_value(
- GetPrototypeSkipHiddenPrototypes(isolate, *receiver), isolate);
+ GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
- MaybeObject* result = receiver->SetPrototype(*value, true);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
+ if (result.is_null()) return Failure::Exception();
Handle<Object> new_value(
- GetPrototypeSkipHiddenPrototypes(isolate, *receiver), isolate);
+ GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(receiver, "prototype",
+ JSObject::EnqueueChangeRecord(obj, "prototype",
isolate->factory()->proto_string(),
old_value);
}
- return *hresult;
+ return *result;
}
- return obj->SetPrototype(prototype, true);
+ Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
+ if (result.is_null()) return Failure::Exception();
+ return *result;
}
@@ -1494,7 +1699,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
+ RegExpImpl::Compile(re, pattern, flags);
if (result.is_null()) return Failure::Exception();
return *result;
}
@@ -2594,18 +2799,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
JavaScriptFrameIterator stack_iterator(isolate);
JavaScriptFrame* frame = stack_iterator.frame();
- JSFunction* function = JSFunction::cast(frame->function());
- RUNTIME_ASSERT(function->shared()->is_generator());
- ASSERT_EQ(function, generator_object->function());
+ RUNTIME_ASSERT(JSFunction::cast(frame->function())->shared()->is_generator());
+ ASSERT_EQ(JSFunction::cast(frame->function()), generator_object->function());
+
+ // The caller should have saved the context and continuation already.
+ ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
+ ASSERT_LT(0, generator_object->continuation());
// We expect there to be at least two values on the operand stack: the return
// value of the yield expression, and the argument to this runtime call.
// Neither of those should be saved.
int operands_count = frame->ComputeOperandsCount();
- ASSERT(operands_count >= 2);
+ ASSERT_GE(operands_count, 2);
operands_count -= 2;
if (operands_count == 0) {
+ // Although it's semantically harmless to call this function with an
+ // operands_count of zero, it is also unnecessary.
ASSERT_EQ(generator_object->operand_stack(),
isolate->heap()->empty_fixed_array());
ASSERT_EQ(generator_object->stack_handler_index(), -1);
@@ -2622,20 +2832,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
generator_object->set_stack_handler_index(stack_handler_index);
}
- // Set continuation down here to avoid side effects if the operand stack
- // allocation fails.
- intptr_t offset = frame->pc() - function->code()->instruction_start();
- ASSERT(offset > 0 && Smi::IsValid(offset));
- generator_object->set_continuation(static_cast<int>(offset));
-
- // It's possible for the context to be other than the initial context even if
- // there is no stack handler active. For example, this is the case in the
- // body of a "with" statement. Therefore we always save the context.
- generator_object->set_context(Context::cast(frame->context()));
-
- // The return value is the hole for a suspend return, and anything else for a
- // resume return.
- return isolate->heap()->the_hole_value();
+ return isolate->heap()->undefined_value();
}
@@ -3402,9 +3599,8 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> indices(8, zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> indices(8, zone_scope.zone());
ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -3413,7 +3609,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
int replacement_len = replacement->length();
FindStringIndicesDispatch(
- isolate, *subject, pattern, &indices, 0xffffffff, zone);
+ isolate, *subject, pattern, &indices, 0xffffffff, zone_scope.zone());
int matches = indices.length();
if (matches == 0) return *subject;
@@ -3489,9 +3685,8 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
int subject_length = subject->length();
// CompiledReplacement uses zone allocation.
- Zone* zone = isolate->runtime_zone();
- ZoneScope zonescope(zone, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement(zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ CompiledReplacement compiled_replacement(zone_scope.zone());
bool simple_replace = compiled_replacement.Compile(replacement,
capture_count,
subject_length);
@@ -4024,15 +4219,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
int capture_count = regexp->CaptureCount();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> offsets(8, zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> offsets(8, zone_scope.zone());
while (true) {
int32_t* match = global_cache.FetchNext();
if (match == NULL) break;
- offsets.Add(match[0], zone); // start
- offsets.Add(match[1], zone); // end
+ offsets.Add(match[0], zone_scope.zone()); // start
+ offsets.Add(match[1], zone_scope.zone()); // end
}
if (global_cache.HasException()) return Failure::Exception();
@@ -4514,7 +4708,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
(dictionary->DetailsAt(entry).type() == NORMAL)) {
Object* value = dictionary->ValueAt(entry);
if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
if (!value->IsTheHole()) return value;
// If value is the hole do the general lookup.
}
@@ -4754,25 +4948,40 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
}
js_object->ValidateElements();
- Handle<Object> result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
+ if (js_object->HasExternalArrayElements()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ if (has_exception) return Failure::Exception();
+ value = number;
+ }
+ }
+ MaybeObject* result = js_object->SetElement(
+ index, *value, attr, strict_mode, true, set_mode);
js_object->ValidateElements();
- if (result.is_null()) return Failure::Exception();
+ if (result->IsFailure()) return result;
return *value;
}
if (key->IsName()) {
- Handle<Object> result;
+ MaybeObject* result;
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
+ if (js_object->HasExternalArrayElements()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ if (has_exception) return Failure::Exception();
+ value = number;
+ }
+ }
+ result = js_object->SetElement(
+ index, *value, attr, strict_mode, true, set_mode);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- result = JSReceiver::SetProperty(
- js_object, name, value, attr, strict_mode);
+ result = js_object->SetProperty(*name, *value, attr, strict_mode);
}
- if (result.is_null()) return Failure::Exception();
+ if (result->IsFailure()) return result;
return *value;
}
@@ -6102,18 +6311,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
static const int kMaxInitialListCapacity = 16;
- Zone* zone = isolate->runtime_zone();
- ZoneScope scope(zone, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate->runtime_zone());
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity, zone);
+ ZoneList<int> indices(initial_capacity, zone_scope.zone());
if (!pattern->IsFlat()) FlattenString(pattern);
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
+ FindStringIndicesDispatch(isolate, *subject, *pattern,
+ &indices, limit, zone_scope.zone());
if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length, zone);
+ indices.Add(subject_length, zone_scope.zone());
}
// The list indices now contains the end of each part to create.
@@ -7916,12 +8125,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation);
- OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
- do {
- // The function could have been marked for installing, but not queued just
- // yet. In this case, retry until installed.
- opt_thread->InstallOptimizedFunctions();
- } while (function->IsMarkedForInstallingRecompiledCode());
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
return function->code();
}
@@ -8071,6 +8275,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsParallelRecompilationSupported) {
+ HandleScope scope(isolate);
+ return FLAG_parallel_recompilation
+ ? isolate->heap()->true_value() : isolate->heap()->false_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
@@ -8097,13 +8308,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation) {
- if (V8::UseCrankshaft() && function->IsOptimizable()) {
- while (!function->IsOptimized()) OS::Sleep(50);
+ if (FLAG_parallel_recompilation && V8::UseCrankshaft()) {
+ // While function is in optimization pipeline, it is marked accordingly.
+ // Note that if the debugger is activated during parallel recompilation,
+ // the function will be marked with the lazy-recompile builtin, which is
+ // not related to parallel recompilation.
+ while (function->IsMarkedForParallelRecompilation() ||
+ function->IsInRecompileQueue() ||
+ function->IsMarkedForInstallingRecompiledCode()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ OS::Sleep(50);
}
}
return isolate->heap()->undefined_value();
@@ -8242,9 +8460,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
*interrupt_code,
*replacement_code);
- // Allow OSR only at nesting level zero again.
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
-
// If the optimization attempt succeeded, return the AST id tagged as a
// smi. This tells the builtin that we need to translate the unoptimized
// frame to an optimized one.
@@ -9104,14 +9319,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Zone* zone = isolate->runtime_zone();
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
if (source->IsSeqOneByteString()) {
- result = JsonParser<true>::Parse(source, zone);
+ result = JsonParser<true>::Parse(source);
} else {
- result = JsonParser<false>::Parse(source, zone);
+ result = JsonParser<false>::Parse(source);
}
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
@@ -11564,6 +11778,58 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
}
+// Returns the list of step-in positions (text offset) in a function of the
+// stack frame in a range from the current debug break position to the end
+// of the corresponding statement.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
+
+ int len = 0;
+ Handle<JSArray> array(isolate->factory()->NewJSArray(10));
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ int current_statement_pos = break_location_iterator.statement_position();
+
+ while (!break_location_iterator.Done()) {
+ if (break_location_iterator.IsStepInLocation(isolate)) {
+ Smi* position_value = Smi::FromInt(break_location_iterator.position());
+ JSObject::SetElement(array, len,
+ Handle<Object>(position_value, isolate),
+ NONE, kNonStrictMode);
+ len++;
+ }
+ // Advance iterator.
+ break_location_iterator.Next();
+ if (current_statement_pos !=
+ break_location_iterator.statement_position()) {
+ break;
+ }
+ }
+ return *array;
+}
+
+
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
@@ -11835,14 +12101,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
}
+static bool IsPositionAlignmentCodeCorrect(int alignment) {
+ return alignment == STATEMENT_ALIGNED || alignment == BREAK_POSITION_ALIGNED;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
+
+ if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
+ return isolate->ThrowIllegalOperation();
+ }
+ BreakPositionAlignment alignment =
+ static_cast<BreakPositionAlignment>(statement_aligned_code);
+
Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
- Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
+ Handle<Object> break_locations =
+ Debug::GetSourceBreakLocations(shared, alignment);
if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
// Return array as JS array
return *isolate->factory()->NewJSArrayWithElements(
@@ -11875,14 +12155,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
// GetScriptFromScriptData.
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
+// args[2]: number, breakpoint position alignment
+// args[3]: number: break point object
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
+ CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]);
+ Handle<Object> break_point_object_arg = args.at<Object>(3);
+
+ if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
+ return isolate->ThrowIllegalOperation();
+ }
+ BreakPositionAlignment alignment =
+ static_cast<BreakPositionAlignment>(statement_aligned_code);
// Get the script from the script wrapper.
RUNTIME_ASSERT(wrapper->value()->IsScript());
@@ -11890,7 +12178,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
// Set break point.
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position)) {
+ &source_position,
+ alignment)) {
return isolate->heap()->undefined_value();
}
@@ -12158,6 +12447,15 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
// the same view of the values of parameters and local variables as if the
// piece of JavaScript was evaluated at the point where the function on the
// stack frame is currently stopped when we compile and run the (direct) eval.
+// Returns array of
+// #0: evaluate result
+// #1: local variables materizalized again as object after evaluation, contain
+// original variable values as they remained on stack
+// #2: local variables materizalized as object before evaluation (and possibly
+// modified by expression having been executed)
+// Since user expression only reaches (and modifies) copies of local variables,
+// those copies are returned to the caller to allow tracking the changes and
+// manually updating the actual variables.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
HandleScope scope(isolate);
@@ -12257,7 +12555,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
}
Handle<Object> receiver(frame->receiver(), isolate);
- return DebugEvaluate(isolate, context, context_extension, receiver, source);
+ Object* evaluate_result_object;
+ { MaybeObject* maybe_result =
+ DebugEvaluate(isolate, context, context_extension, receiver, source);
+ if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result;
+ }
+ Handle<Object> evaluate_result(evaluate_result_object, isolate);
+
+ Handle<JSObject> local_scope_control_copy =
+ MaterializeLocalScopeWithFrameInspector(isolate, frame,
+ &frame_inspector);
+
+ Handle<FixedArray> resultArray = isolate->factory()->NewFixedArray(3);
+ resultArray->set(0, *evaluate_result);
+ resultArray->set(1, *local_scope_control_copy);
+ resultArray->set(2, *local_scope);
+
+ return *(isolate->factory()->NewJSArrayWithElements(resultArray));
}
@@ -12831,8 +13145,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
- isolate->runtime_zone());
+ return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
// Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -12880,8 +13193,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
}
if (it.done()) return heap->undefined_value();
- const char* error_message =
- LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+ const char* error_message = LiveEdit::RestartFrame(it.frame());
if (error_message) {
return *(isolate->factory()->InternalizeUtf8String(error_message));
}
@@ -13091,6 +13403,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) {
CONVERT_ARG_CHECKED(JSObject, error_object, 0);
String* key = isolate->heap()->hidden_stack_trace_string();
Object* result = error_object->GetHiddenProperty(key);
+ if (result->IsTheHole()) result = isolate->heap()->undefined_value();
RUNTIME_ASSERT(result->IsJSArray() ||
result->IsString() ||
result->IsUndefined());
@@ -13479,9 +13792,9 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
MaybeObject* maybe_array;
if (!type_info.is_null() &&
*type_info != isolate->heap()->undefined_value() &&
- JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi() &&
+ Cell::cast(*type_info)->value()->IsSmi() &&
can_use_type_feedback) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
+ Cell* cell = Cell::cast(*type_info);
Smi* smi = Smi::cast(cell->value());
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
if (holey && !IsFastHoleyElementsKind(to_kind)) {
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index ef5401610f..70568f9fa8 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -95,8 +95,9 @@ namespace internal {
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
+ F(IsParallelRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(WaitUntilOptimized, 1, 1) \
+ F(CompleteOptimization, 1, 1) \
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
@@ -368,6 +369,28 @@ namespace internal {
F(TypedArrayGetLength, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
\
+ F(DataViewInitialize, 4, 1) \
+ F(DataViewGetBuffer, 1, 1) \
+ F(DataViewGetByteLength, 1, 1) \
+ F(DataViewGetByteOffset, 1, 1) \
+ F(DataViewGetInt8, 3, 1) \
+ F(DataViewGetUint8, 3, 1) \
+ F(DataViewGetInt16, 3, 1) \
+ F(DataViewGetUint16, 3, 1) \
+ F(DataViewGetInt32, 3, 1) \
+ F(DataViewGetUint32, 3, 1) \
+ F(DataViewGetFloat32, 3, 1) \
+ F(DataViewGetFloat64, 3, 1) \
+ \
+ F(DataViewSetInt8, 4, 1) \
+ F(DataViewSetUint8, 4, 1) \
+ F(DataViewSetInt16, 4, 1) \
+ F(DataViewSetUint16, 4, 1) \
+ F(DataViewSetInt32, 4, 1) \
+ F(DataViewSetUint32, 4, 1) \
+ F(DataViewSetFloat32, 4, 1) \
+ F(DataViewSetFloat64, 4, 1) \
+ \
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewObject, 1, 1) \
@@ -467,6 +490,7 @@ namespace internal {
F(GetFrameCount, 1, 1) \
F(GetFrameDetails, 2, 1) \
F(GetScopeCount, 2, 1) \
+ F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
@@ -475,9 +499,9 @@ namespace internal {
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
F(SetDisableBreak, 1, 1) \
- F(GetBreakLocations, 1, 1) \
+ F(GetBreakLocations, 2, 1) \
F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
+ F(SetScriptBreakPoint, 4, 1) \
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
@@ -573,7 +597,8 @@ namespace internal {
F(GetCachedArrayIndex, 1, 1) \
F(FastAsciiArrayJoin, 2, 1) \
F(GeneratorNext, 2, 1) \
- F(GeneratorThrow, 2, 1)
+ F(GeneratorThrow, 2, 1) \
+ F(DebugBreakInOptimizedCode, 0, 1)
// ----------------------------------------------------------------------------
@@ -768,6 +793,10 @@ class Runtime : public AllStatic {
Handle<JSArrayBuffer> array_buffer,
size_t allocated_length);
+ static void FreeArrayBuffer(
+ Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer);
+
// Helper functions used stubs.
static void PerformGC(Object* result);
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 22f888d814..348fd747f5 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -658,7 +658,6 @@ function DefaultNumber(x) {
throw %MakeTypeError('cannot_convert_to_primitive', []);
}
-
// ECMA-262, section 8.6.2.6, page 28.
function DefaultString(x) {
var toString = x.toString;
@@ -676,6 +675,12 @@ function DefaultString(x) {
throw %MakeTypeError('cannot_convert_to_primitive', []);
}
+function ToPositiveInteger(x, rangeErrorName) {
+ var i = TO_INTEGER(x);
+ if (i < 0) throw %MakeRangeError(rangeErrorName);
+ return i;
+}
+
// NOTE: Setting the prototype for Array must take place as early as
// possible due to code generation for array literals. When
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index efac288ee7..982f252807 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "sampler.h"
+
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
|| defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
|| defined(__native_client__)
@@ -60,6 +62,8 @@
#include "v8.h"
+#include "cpu-profiler.h"
+#include "flags.h"
#include "frames-inl.h"
#include "log.h"
#include "platform.h"
@@ -227,44 +231,36 @@ class Sampler::PlatformData : public PlatformDataCommon {
#endif
-class SampleHelper {
- public:
- inline TickSample* Init(Sampler* sampler, Isolate* isolate) {
#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ inline bool Init(Sampler* sampler, Isolate* isolate) {
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return NULL;
+ if (!per_thread_data) return false;
simulator_ = per_thread_data->simulator();
- // Check if there is active simulator before allocating TickSample.
- if (!simulator_) return NULL;
-#endif // USE_SIMULATOR
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
- return sample;
+ // Check if there is active simulator.
+ return simulator_ != NULL;
}
-#if defined(USE_SIMULATOR)
- inline void FillRegisters(TickSample* sample) {
- sample->pc = reinterpret_cast<Address>(simulator_->get_pc());
- sample->sp = reinterpret_cast<Address>(simulator_->get_register(
+ inline void FillRegisters(RegisterState* state) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
#if V8_TARGET_ARCH_ARM
- sample->fp = reinterpret_cast<Address>(simulator_->get_register(
+ state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
- sample->fp = reinterpret_cast<Address>(simulator_->get_register(
+ state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
}
-#endif // USE_SIMULATOR
private:
-#if defined(USE_SIMULATOR)
Simulator* simulator_;
-#endif
- TickSample sample_obj;
};
+#endif // USE_SIMULATOR
#if defined(USE_SIGNALS)
@@ -324,89 +320,86 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
- SampleHelper helper;
- TickSample* sample = helper.Init(sampler, isolate);
- if (sample == NULL) return;
+ RegisterState state;
#if defined(USE_SIMULATOR)
- helper.FillRegisters(sample);
+ SimulatorHelper helper;
+ if (!helper.Init(sampler, isolate)) return;
+ helper.FillRegisters(&state);
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
#if defined(__linux__) || defined(__ANDROID__)
#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
(__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
// Old GLibc ARM versions used a gregs[] array to access the register
// values from mcontext_t.
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+ state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(mcontext.pc);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
#elif defined(__FreeBSD__)
#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+ state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+ state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
+ state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
#elif defined(__NetBSD__)
#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
+ state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
+ state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
+ state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
+ state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
#elif defined(__OpenBSD__)
USE(mcontext);
#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+ state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
+ state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
+ state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+ state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
+ state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+ state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
#elif defined(__sun)
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
#endif // __sun
#endif // USE_SIMULATOR
-
- sampler->SampleStack(sample);
- sampler->Tick(sample);
+ sampler->SampleStack(state);
#endif // __native_client__
}
@@ -496,26 +489,25 @@ class SamplerThread : public Thread {
void SampleContext(Sampler* sampler) {
if (!SignalHandler::Installed()) return;
pthread_t tid = sampler->platform_data()->vm_tid();
- int result = pthread_kill(tid, SIGPROF);
- USE(result);
- ASSERT(result == 0);
+ pthread_kill(tid, SIGPROF);
}
#elif defined(__MACH__)
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- Isolate* isolate = sampler->isolate();
- SampleHelper helper;
- TickSample* sample = helper.Init(sampler, isolate);
- if (sample == NULL) return;
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ Isolate* isolate = sampler->isolate();
+ if (!helper.Init(sampler, isolate)) return;
+#endif
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
#if V8_HOST_ARCH_X64
thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
+ x86_thread_state64_t thread_state;
mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __r ## name
@@ -524,7 +516,7 @@ class SamplerThread : public Thread {
#endif // __DARWIN_UNIX03
#elif V8_HOST_ARCH_IA32
thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
+ i386_thread_state_t thread_state;
mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __e ## name
@@ -537,19 +529,18 @@ class SamplerThread : public Thread {
if (thread_get_state(profiled_thread,
flavor,
- reinterpret_cast<natural_t*>(&state),
+ reinterpret_cast<natural_t*>(&thread_state),
&count) == KERN_SUCCESS) {
- sample->state = isolate->current_vm_state();
+ RegisterState state;
#if defined(USE_SIMULATOR)
- helper.FillRegisters(sample);
+ helper.FillRegisters(&state);
#else
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+ state.pc = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(ip));
+ state.sp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(sp));
+ state.fp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(bp));
#endif // USE_SIMULATOR
#undef REGISTER_FIELD
- sampler->SampleStack(sample);
- sampler->Tick(sample);
+ sampler->SampleStack(state);
}
thread_resume(profiled_thread);
}
@@ -561,34 +552,34 @@ class SamplerThread : public Thread {
if (profiled_thread == NULL) return;
Isolate* isolate = sampler->isolate();
- SampleHelper helper;
- TickSample* sample = helper.Init(sampler, isolate);
- if (sample == NULL) return;
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ if (!helper.Init(sampler, isolate)) return;
+#endif
const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = isolate->current_vm_state();
// Context used for sampling the register state of the profiled thread.
CONTEXT context;
memset(&context, 0, sizeof(context));
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread, &context) != 0) {
+ RegisterState state;
#if defined(USE_SIMULATOR)
- helper.FillRegisters(sample);
+ helper.FillRegisters(&state);
#else
#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
+ state.pc = reinterpret_cast<Address>(context.Rip);
+ state.sp = reinterpret_cast<Address>(context.Rsp);
+ state.fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
+ state.pc = reinterpret_cast<Address>(context.Eip);
+ state.sp = reinterpret_cast<Address>(context.Esp);
+ state.fp = reinterpret_cast<Address>(context.Ebp);
#endif
#endif // USE_SIMULATOR
- sampler->SampleStack(sample);
- sampler->Tick(sample);
+ sampler->SampleStack(state);
}
ResumeThread(profiled_thread);
}
@@ -614,8 +605,11 @@ SamplerThread* SamplerThread::instance_ = NULL;
//
// StackTracer implementation
//
-DISABLE_ASAN void TickSample::Trace(Isolate* isolate) {
+DISABLE_ASAN void TickSample::Init(Isolate* isolate,
+ const RegisterState& regs) {
ASSERT(isolate->IsInitialized());
+ pc = regs.pc;
+ state = isolate->current_vm_state();
// Avoid collecting traces while doing GC.
if (state == GC) return;
@@ -634,11 +628,12 @@ DISABLE_ASAN void TickSample::Trace(Isolate* isolate) {
} else {
// Sample potential return address value for frameless invocation of
// stubs (we'll figure out later, if this value makes sense).
- tos = Memory::Address_at(sp);
+ tos = Memory::Address_at(regs.sp);
has_external_callback = false;
}
- SafeStackTraceFrameIterator it(isolate, fp, sp, sp, js_entry_sp);
+ SafeStackFrameIterator it(isolate, regs.fp, regs.sp, js_entry_sp);
+ top_frame_type = it.top_frame_type();
int i = 0;
while (!it.done() && i < TickSample::kMaxFramesCount) {
stack[i++] = it.frame()->pc();
@@ -686,9 +681,13 @@ void Sampler::Stop() {
SetActive(false);
}
-void Sampler::SampleStack(TickSample* sample) {
- sample->Trace(isolate_);
+void Sampler::SampleStack(const RegisterState& state) {
+ TickSample* sample = isolate_->cpu_profiler()->TickSampleEvent();
+ TickSample sample_obj;
+ if (sample == NULL) sample = &sample_obj;
+ sample->Init(isolate_, state);
if (++samples_taken_ < 0) samples_taken_ = 0;
+ Tick(sample);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
index 1d9ac8723b..a47a3635dd 100644
--- a/deps/v8/src/sampler.h
+++ b/deps/v8/src/sampler.h
@@ -29,6 +29,7 @@
#define V8_SAMPLER_H_
#include "atomicops.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -43,21 +44,25 @@ class Isolate;
// (if used for profiling) the program counter and stack pointer for
// the thread that created it.
+struct RegisterState {
+ RegisterState() : pc(NULL), sp(NULL), fp(NULL) {}
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+};
+
// TickSample captures the information collected for each sample.
struct TickSample {
TickSample()
: state(OTHER),
pc(NULL),
- sp(NULL),
- fp(NULL),
external_callback(NULL),
frames_count(0),
- has_external_callback(false) {}
- void Trace(Isolate* isolate);
+ has_external_callback(false),
+ top_frame_type(StackFrame::NONE) {}
+ void Init(Isolate* isolate, const RegisterState& state);
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
union {
Address tos; // Top stack value (*sp).
Address external_callback;
@@ -66,6 +71,7 @@ struct TickSample {
Address stack[kMaxFramesCount]; // Call stack.
int frames_count : 8; // Number of captured frames.
bool has_external_callback : 1;
+ StackFrame::Type top_frame_type : 4;
};
class Sampler {
@@ -82,11 +88,7 @@ class Sampler {
int interval() const { return interval_; }
// Performs stack sampling.
- void SampleStack(TickSample* sample);
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
+ void SampleStack(const RegisterState& regs);
// Start and stop sampler.
void Start();
@@ -107,6 +109,11 @@ class Sampler {
class PlatformData;
PlatformData* platform_data() const { return data_; }
+ protected:
+ // This method is called for each sampling period with the current
+ // program counter.
+ virtual void Tick(TickSample* sample) = 0;
+
private:
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 6c5ccea817..4e51cd396b 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -687,6 +687,8 @@ void Deserializer::Deserialize() {
isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value());
+ isolate_->heap()->set_array_buffers_list(
+ isolate_->heap()->undefined_value());
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
@@ -776,6 +778,7 @@ void Deserializer::ReadChunk(Object** current,
bool write_barrier_needed = (current_object_address != NULL &&
source_space != NEW_SPACE &&
source_space != CELL_SPACE &&
+ source_space != PROPERTY_CELL_SPACE &&
source_space != CODE_SPACE &&
source_space != OLD_DATA_SPACE);
while (current < limit) {
@@ -836,8 +839,7 @@ void Deserializer::ReadChunk(Object** current,
new_code_object->instruction_start()); \
} else { \
ASSERT(space_number == CODE_SPACE); \
- JSGlobalPropertyCell* cell = \
- JSGlobalPropertyCell::cast(new_object); \
+ Cell* cell = Cell::cast(new_object); \
new_object = reinterpret_cast<Object*>( \
cell->ValueAddress()); \
} \
@@ -877,6 +879,7 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
@@ -1564,10 +1567,9 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
}
-void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
+void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = Cell::cast(rinfo->target_cell());
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 1b56a882f6..a6099afc23 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -519,7 +519,7 @@ class Serializer : public SerializerDeserializer {
void VisitExternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
- void VisitGlobalPropertyCell(RelocInfo* rinfo);
+ void VisitCell(RelocInfo* rinfo);
void VisitRuntimeEntry(RelocInfo* reloc);
// Used for seralizing the external strings that hold the natives source.
void VisitExternalAsciiString(
@@ -613,7 +613,7 @@ class PartialSerializer : public Serializer {
// unique ID, and deserializing several partial snapshots containing script
// would cause dupes.
ASSERT(!o->IsScript());
- return o->IsString() || o->IsSharedFunctionInfo() ||
+ return o->IsName() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
o->IsScopeInfo() ||
o->map() == HEAP->fixed_cow_array_map();
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index a8806f053f..576269df9e 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -45,7 +45,8 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
- int new_size, pointer_size, data_size, code_size, map_size, cell_size;
+ int new_size, pointer_size, data_size, code_size, map_size, cell_size,
+ property_cell_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
@@ -57,6 +58,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
+ CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
#ifdef _MSC_VER
#undef fscanf
#endif
@@ -67,6 +69,8 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
+ deserializer->set_reservation(PROPERTY_CELL_SPACE,
+ property_cell_size);
name.Dispose();
}
@@ -78,6 +82,8 @@ void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
+ deserializer->set_reservation(PROPERTY_CELL_SPACE,
+ property_cell_space_used_);
}
@@ -124,6 +130,8 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
+ deserializer.set_reservation(PROPERTY_CELL_SPACE,
+ context_property_cell_space_used_);
deserializer.DeserializePartial(&root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc
index 70e7ab815c..54236d82ec 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot-empty.cc
@@ -49,6 +49,7 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
+const int Snapshot::property_cell_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
@@ -56,5 +57,6 @@ const int Snapshot::context_data_space_used_ = 0;
const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
+const int Snapshot::context_property_cell_space_used_ = 0;
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index c4ae45eee0..149306e442 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -77,12 +77,14 @@ class Snapshot {
static const int code_space_used_;
static const int map_space_used_;
static const int cell_space_used_;
+ static const int property_cell_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
static const int context_code_space_used_;
static const int context_map_space_used_;
static const int context_cell_space_used_;
+ static const int context_property_cell_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 099ad93a1e..15381eaf32 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -72,6 +72,7 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == page->heap()->old_data_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->cell_space() ||
+ owner == page->heap()->property_cell_space() ||
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner),
page->area_start(),
@@ -1043,6 +1044,9 @@ intptr_t PagedSpace::SizeOfFirstPage() {
case CELL_SPACE:
size = 16 * kPointerSize * KB;
break;
+ case PROPERTY_CELL_SPACE:
+ size = 8 * kPointerSize * KB;
+ break;
case CODE_SPACE:
if (heap()->isolate()->code_range()->exists()) {
// When code range exists, code pages are allocated in a special way
@@ -1786,49 +1790,20 @@ static void ClearHistograms() {
}
-static void ClearCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
+static void ClearCodeKindStatistics(int* code_kind_statistics) {
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- isolate->code_kind_statistics()[i] = 0;
+ code_kind_statistics[i] = 0;
}
}
-static void ReportCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
-
-#define CASE(name) \
- case Code::name: table[Code::name] = #name; \
- break
-
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- switch (static_cast<Code::Kind>(i)) {
- CASE(FUNCTION);
- CASE(OPTIMIZED_FUNCTION);
- CASE(STUB);
- CASE(BUILTIN);
- CASE(LOAD_IC);
- CASE(KEYED_LOAD_IC);
- CASE(STORE_IC);
- CASE(KEYED_STORE_IC);
- CASE(CALL_IC);
- CASE(KEYED_CALL_IC);
- CASE(UNARY_OP_IC);
- CASE(BINARY_OP_IC);
- CASE(COMPARE_IC);
- CASE(COMPARE_NIL_IC);
- CASE(TO_BOOLEAN_IC);
- }
- }
-
-#undef CASE
-
+static void ReportCodeKindStatistics(int* code_kind_statistics) {
PrintF("\n Code kind histograms: \n");
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (isolate->code_kind_statistics()[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i],
- isolate->code_kind_statistics()[i]);
+ if (code_kind_statistics[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n",
+ Code::Kind2String(static_cast<Code::Kind>(i)),
+ code_kind_statistics[i]);
}
}
PrintF("\n");
@@ -1836,7 +1811,7 @@ static void ReportCodeKindStatistics() {
static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = obj->GetIsolate();
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
ASSERT(isolate->heap_histograms()[type].name() != NULL);
@@ -2114,13 +2089,13 @@ FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
while (node != NULL &&
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
+ available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
node = node->next();
}
if (node != NULL) {
set_top(node->next());
- *node_size = node->Size();
+ *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
available_ -= *node_size;
} else {
set_top(NULL);
@@ -2134,6 +2109,18 @@ FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
}
+FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
+ int *node_size) {
+ FreeListNode* node = PickNodeFromList(node_size);
+ if (node != NULL && *node_size < size_in_bytes) {
+ Free(node, *node_size);
+ *node_size = 0;
+ return NULL;
+ }
+ return node;
+}
+
+
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
node->set_next(top_);
top_ = node;
@@ -2223,8 +2210,10 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
if (size_in_bytes <= kSmallAllocationMax) {
node = small_list_.PickNodeFromList(node_size);
if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_small_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
return node;
}
}
@@ -2232,8 +2221,10 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
if (size_in_bytes <= kMediumAllocationMax) {
node = medium_list_.PickNodeFromList(node_size);
if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_medium_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
return node;
}
}
@@ -2241,8 +2232,10 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
if (size_in_bytes <= kLargeAllocationMax) {
node = large_list_.PickNodeFromList(node_size);
if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_large_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
return node;
}
}
@@ -2285,10 +2278,37 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
-
huge_list_.set_available(huge_list_available);
- ASSERT(IsVeryLong() || available() == SumFreeLists());
+ if (node != NULL) {
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return node;
+ }
+
+ if (size_in_bytes <= kSmallListMax) {
+ node = small_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_small_free_list(-(*node_size));
+ }
+ } else if (size_in_bytes <= kMediumListMax) {
+ node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_medium_free_list(-(*node_size));
+ }
+ } else if (size_in_bytes <= kLargeListMax) {
+ node = large_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ }
+ }
+
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
return node;
}
@@ -2665,7 +2685,7 @@ void PagedSpace::ReportCodeStatistics() {
Isolate* isolate = Isolate::Current();
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
- ReportCodeKindStatistics();
+ ReportCodeKindStatistics(isolate->code_kind_statistics());
PrintF("Code comment statistics (\" [ comment-txt : size/ "
"count (average)\"):\n");
for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
@@ -2683,7 +2703,7 @@ void PagedSpace::ResetCodeStatistics() {
Isolate* isolate = Isolate::Current();
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
- ClearCodeKindStatistics();
+ ClearCodeKindStatistics(isolate->code_kind_statistics());
for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
comments_statistics[i].Clear();
}
@@ -2834,14 +2854,21 @@ void MapSpace::VerifyObject(HeapObject* object) {
// -----------------------------------------------------------------------------
-// GlobalPropertyCellSpace implementation
+// CellSpace and PropertyCellSpace implementation
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
- CHECK(object->IsJSGlobalPropertyCell() ||
+ CHECK(object->IsCell() ||
+ object->map() == heap()->two_pointer_filler_map());
+}
+
+
+void PropertyCellSpace::VerifyObject(HeapObject* object) {
+ // The object should be a global object property cell or a free-list node.
+ CHECK(object->IsPropertyCell() ||
object->map() == heap()->two_pointer_filler_map());
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index e7e4d529fc..dda55919c4 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -32,6 +32,7 @@
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
@@ -1454,6 +1455,7 @@ class FreeListCategory {
void Free(FreeListNode* node, int size_in_bytes);
FreeListNode* PickNodeFromList(int *node_size);
+ FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
intptr_t EvictFreeListItemsInList(Page* p);
@@ -2626,20 +2628,20 @@ class MapSpace : public FixedSpace {
// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
+// Old space for simple property cell objects
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize)
+ : FixedSpace(heap, max_capacity, id, Cell::kSize)
{}
virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
- return RoundDown(size, JSGlobalPropertyCell::kSize);
+ if (IsPowerOf2(Cell::kSize)) {
+ return RoundDown(size, Cell::kSize);
} else {
- return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+ return (size / Cell::kSize) * Cell::kSize;
}
}
@@ -2652,6 +2654,33 @@ class CellSpace : public FixedSpace {
// -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class PropertyCellSpace : public FixedSpace {
+ public:
+ // Creates a property cell space object with a maximum capacity.
+ PropertyCellSpace(Heap* heap, intptr_t max_capacity,
+ AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
+ {}
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(PropertyCell::kSize)) {
+ return RoundDown(size, PropertyCell::kSize);
+ } else {
+ return (size / PropertyCell::kSize) * PropertyCell::kSize;
+ }
+ }
+
+ protected:
+ virtual void VerifyObject(HeapObject* obj);
+
+ public:
+ TRACK_MEMORY("PropertyCellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index c650f57ccc..0386280de6 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -121,6 +121,7 @@ void StoreBuffer::TearDown() {
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->Compact();
+ isolate->counters()->store_buffer_overflows()->Increment();
}
@@ -142,6 +143,11 @@ void StoreBuffer::Uniq() {
}
+bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
+ return old_limit_ - old_top_ >= space_needed;
+}
+
+
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
@@ -152,7 +158,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
old_limit_ += grow;
}
- if (old_limit_ - old_top_ >= space_needed) return;
+ if (SpaceAvailable(space_needed)) return;
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
@@ -171,9 +177,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
}
- // If filtering out the entries from scan_on_scavenge pages got us down to
- // less than half full, then we are satisfied with that.
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+ if (SpaceAvailable(space_needed)) return;
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
@@ -192,7 +196,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+ if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
}
@@ -294,7 +298,7 @@ bool StoreBuffer::PrepareForIteration() {
void StoreBuffer::Clean() {
ClearFilteringHashSets();
Uniq(); // Also removes things that no longer point to new space.
- CheckForFullBuffer();
+ EnsureSpace(kStoreBufferSize / 2);
}
@@ -687,12 +691,6 @@ void StoreBuffer::Compact() {
ASSERT(old_top_ <= old_limit_);
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
- CheckForFullBuffer();
-}
-
-
-void StoreBuffer::CheckForFullBuffer() {
- EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
index 514534a1ed..520cbc0162 100644
--- a/deps/v8/src/store-buffer.h
+++ b/deps/v8/src/store-buffer.h
@@ -160,7 +160,7 @@ class StoreBuffer {
void ClearFilteringHashSets();
- void CheckForFullBuffer();
+ bool SpaceAvailable(intptr_t space_needed);
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
@@ -223,7 +223,6 @@ class StoreBufferRebuildScope {
~StoreBufferRebuildScope() {
store_buffer_->callback_ = stored_callback_;
store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
- store_buffer_->CheckForFullBuffer();
}
private:
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 109622567a..9c4394ed7f 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -471,7 +471,7 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
}
JSFunction* fun = JSFunction::cast(f);
- Object* perhaps_context = fun->unchecked_context();
+ Object* perhaps_context = fun->context();
if (perhaps_context->IsHeapObject() &&
heap->Contains(HeapObject::cast(perhaps_context)) &&
perhaps_context->IsContext()) {
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 44315bba16..7e186871ba 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -495,8 +495,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
}
}
- var resultBuilder = new ReplaceResultBuilder(subject, res);
- var result = resultBuilder.generate();
+ var result = %StringBuilderConcat(res, res.length, subject);
resultArray.length = 0;
reusableReplaceArray = resultArray;
return result;
@@ -645,6 +644,8 @@ function StringSplit(separator, limit) {
}
+var ArrayPushBuiltin = $Array.prototype.push;
+
function StringSplitOnRegExp(subject, separator, limit, length) {
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
@@ -664,13 +665,15 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
while (true) {
if (startIndex === length) {
- result.push(%_SubString(subject, currentIndex, length));
+ %_CallFunction(result, %_SubString(subject, currentIndex, length),
+ ArrayPushBuiltin);
break;
}
var matchInfo = DoRegExpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result.push(%_SubString(subject, currentIndex, length));
+ %_CallFunction(result, %_SubString(subject, currentIndex, length),
+ ArrayPushBuiltin);
break;
}
var endIndex = matchInfo[CAPTURE1];
@@ -681,7 +684,8 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
continue;
}
- result.push(%_SubString(subject, currentIndex, startMatch));
+ %_CallFunction(result, %_SubString(subject, currentIndex, startMatch),
+ ArrayPushBuiltin);
if (result.length === limit) break;
@@ -690,9 +694,10 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
var start = matchInfo[i++];
var end = matchInfo[i++];
if (end != -1) {
- result.push(%_SubString(subject, start, end));
+ %_CallFunction(result, %_SubString(subject, start, end),
+ ArrayPushBuiltin);
} else {
- result.push(void 0);
+ %_CallFunction(result, void 0, ArrayPushBuiltin);
}
if (result.length === limit) break outer_loop;
}
@@ -950,43 +955,6 @@ function StringSup() {
return "<sup>" + this + "</sup>";
}
-
-// ReplaceResultBuilder support.
-function ReplaceResultBuilder(str) {
- if (%_ArgumentsLength() > 1) {
- this.elements = %_Arguments(1);
- } else {
- this.elements = new InternalArray();
- }
- this.special_string = str;
-}
-
-SetUpLockedPrototype(ReplaceResultBuilder,
- $Array("elements", "special_string"), $Array(
- "add", function(str) {
- str = TO_STRING_INLINE(str);
- if (str.length > 0) this.elements.push(str);
- },
- "addSpecialSlice", function(start, end) {
- var len = end - start;
- if (start < 0 || len <= 0) return;
- if (start < 0x80000 && len < 0x800) {
- this.elements.push((start << 11) | len);
- } else {
- // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
- // so -len is a smi.
- var elements = this.elements;
- elements.push(-len);
- elements.push(start);
- }
- },
- "generate", function() {
- var elements = this.elements;
- return %StringBuilderConcat(elements, elements.length, this.special_string);
- }
-));
-
-
// -------------------------------------------------------------------
function SetUpString() {
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index dfe2fb7359..a1774b6e1f 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -175,7 +175,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) \
+#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) \
&& !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 0f81669960..62ac2c873b 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "ast.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
@@ -43,7 +44,7 @@ namespace internal {
// StubCache implementation.
-StubCache::StubCache(Isolate* isolate, Zone* zone)
+StubCache::StubCache(Isolate* isolate)
: isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
}
@@ -321,7 +322,7 @@ Handle<Code> StubCache::ComputeLoadNormal(Handle<Name> name,
Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
bool is_dont_delete) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
Handle<Code> stub = FindIC(name, stub_holder, Code::LOAD_IC, Code::NORMAL);
@@ -497,7 +498,7 @@ Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> receiver,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, Handle<JSObject>::cast(receiver),
@@ -644,7 +645,10 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+
+ if (CallStubCompiler::CanBeCached(function)) {
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ }
return code;
}
@@ -734,7 +738,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function) {
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*receiver, *holder);
@@ -753,7 +757,9 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ if (CallStubCompiler::CanBeCached(function)) {
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ }
return code;
}
@@ -1108,12 +1114,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
PropertyCallbackArguments
custom_args(isolate, callback->data(), recv, recv);
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, setter_address);
- custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
- }
+ custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -1159,12 +1160,8 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
{
// Use the interceptor getter.
HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- r = callback_args.Call(getter, v8::Utils::ToLocal(name));
- }
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
Handle<Object> result = v8::Utils::OpenHandle(*r);
@@ -1229,12 +1226,8 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
{
// Use the interceptor getter.
HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- r = callback_args.Call(getter, v8::Utils::ToLocal(name));
- }
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
*attrs = NONE;
@@ -1974,12 +1967,25 @@ bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
}
+bool CallStubCompiler::CanBeCached(Handle<JSFunction> function) {
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) if (id == k##name) return false;
+ SITE_SPECIFIC_CALL_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+ }
+
+ return true;
+}
+
+
Handle<Code> CallStubCompiler::CompileCustomCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> fname) {
+ Handle<String> fname,
+ Code::StubType type) {
ASSERT(HasCustomCallGenerator(function));
if (function->shared()->HasBuiltinFunctionId()) {
@@ -1990,7 +1996,8 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
holder, \
cell, \
function, \
- fname); \
+ fname, \
+ type); \
}
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index a1b55d8d11..6d70d3477d 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -140,7 +140,7 @@ class StubCache {
Handle<Code> ComputeLoadGlobal(Handle<Name> name,
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
bool is_dont_delete);
// ---
@@ -183,7 +183,7 @@ class StubCache {
Handle<Code> ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreCallback(Handle<Name> name,
@@ -251,7 +251,7 @@ class StubCache {
Handle<Name> name,
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function);
// ---
@@ -367,7 +367,7 @@ class StubCache {
Factory* factory() { return isolate()->factory(); }
private:
- StubCache(Isolate* isolate, Zone* zone);
+ explicit StubCache(Isolate* isolate);
Handle<Code> ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
@@ -765,7 +765,7 @@ class LoadStubCompiler: public BaseLoadStubCompiler {
Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete);
@@ -890,7 +890,7 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
Handle<Name> name);
Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> holder,
+ Handle<PropertyCell> holder,
Handle<Name> name);
private:
@@ -969,7 +969,12 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
V(StringCharAt) \
V(StringFromCharCode) \
V(MathFloor) \
- V(MathAbs)
+ V(MathAbs) \
+ V(ArrayCode)
+
+
+#define SITE_SPECIFIC_CALL_GENERATORS(V) \
+ V(ArrayCode)
class CallOptimization;
@@ -1007,11 +1012,12 @@ class CallStubCompiler: public StubCompiler {
Handle<Code> CompileCallGlobal(Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name);
static bool HasCustomCallGenerator(Handle<JSFunction> function);
+ static bool CanBeCached(Handle<JSFunction> function);
private:
// Compiles a custom call constant/global IC. For constant calls cell is
@@ -1019,23 +1025,25 @@ class CallStubCompiler: public StubCompiler {
// given function.
Handle<Code> CompileCustomCall(Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name);
+ Handle<String> name,
+ Code::StubType type);
#define DECLARE_CALL_GENERATOR(name) \
Handle<Code> Compile##name##Call(Handle<Object> object, \
Handle<JSObject> holder, \
- Handle<JSGlobalPropertyCell> cell, \
+ Handle<Cell> cell, \
Handle<JSFunction> function, \
- Handle<String> fname);
+ Handle<String> fname, \
+ Code::StubType type);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name);
@@ -1053,7 +1061,7 @@ class CallStubCompiler: public StubCompiler {
// Generates code to load the function from the cell checking that
// it still contains the same function.
- void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+ void GenerateLoadFunctionFromCell(Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss);
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index 099f5d1879..ede567a485 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -93,6 +93,7 @@ void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_sweeping_semaphore_->Signal();
stop_semaphore_->Wait();
+ Join();
}
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
index cabd37ac7f..6c3de3e011 100644
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -26,6 +26,9 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
+ 'variables': {
+ 'v8_code': 1,
+ },
'includes': ['../../../build/common.gypi'],
'targets': [
{
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index d3f7a68f43..0f35290d19 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -192,8 +192,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
if (*script != NULL) {
// Get the source file name and set it to jmethod.source_file_name
if ((*script->GetScriptName())->IsString()) {
- Handle<String> script_name =
- Handle<String>(String::Cast(*script->GetScriptName()));
+ Handle<String> script_name = script->GetScriptName()->ToString();
temp_file_name = new char[script_name->Utf8Length() + 1];
script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 5113c550ec..83eb9c45b4 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -42,20 +42,17 @@ namespace v8 {
namespace internal {
-TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
- TypeInfo info;
+TypeInfo TypeInfo::FromValue(Handle<Object> value) {
if (value->IsSmi()) {
- info = TypeInfo::Smi();
+ return TypeInfo::Smi();
} else if (value->IsHeapNumber()) {
- info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
+ return TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
? TypeInfo::Integer32()
: TypeInfo::Double();
} else if (value->IsString()) {
- info = TypeInfo::String();
- } else {
- info = TypeInfo::Unknown();
+ return TypeInfo::String();
}
- return info;
+ return TypeInfo::Unknown();
}
@@ -80,8 +77,8 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
if (entry != UnseededNumberDictionary::kNotFound) {
Object* value = dictionary_->ValueAt(entry);
- if (value->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(value);
+ if (value->IsCell()) {
+ Cell* cell = Cell::cast(value);
return Handle<Object>(cell->value(), isolate_);
} else {
return Handle<Object>(value, isolate_);
@@ -91,15 +88,14 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
}
-Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetInfoCell(
+Handle<Cell> TypeFeedbackOracle::GetInfoCell(
TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
if (entry != UnseededNumberDictionary::kNotFound) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
- dictionary_->ValueAt(entry));
- return Handle<JSGlobalPropertyCell>(cell, isolate_);
+ Cell* cell = Cell::cast(dictionary_->ValueAt(entry));
+ return Handle<Cell>(cell, isolate_);
}
- return Handle<JSGlobalPropertyCell>::null();
+ return Handle<Cell>::null();
}
@@ -142,6 +138,15 @@ bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
}
+bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
+ if (map_or_code->IsMap()) return false;
+ if (!map_or_code->IsCode()) return true;
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->ic_state() == UNINITIALIZED;
+}
+
+
bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsMap()) return true;
@@ -235,24 +240,6 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
}
-Handle<Map> TypeFeedbackOracle::CompareNilMonomorphicReceiverType(
- CompareOperation* expr) {
- Handle<Object> maybe_code = GetInfo(expr->CompareOperationFeedbackId());
- if (maybe_code->IsCode()) {
- Map* map = Handle<Code>::cast(maybe_code)->FindFirstMap();
- if (map == NULL) return Handle<Map>();
- map = map->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>()
- : Handle<Map>(map);
- } else if (maybe_code->IsMap()) {
- ASSERT(!Handle<Map>::cast(maybe_code)->is_deprecated());
- return Handle<Map>::cast(maybe_code);
- }
- return Handle<Map>();
-}
-
-
KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
@@ -269,7 +256,9 @@ KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
@@ -335,8 +324,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
}
-Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(
- CallNew* expr) {
+Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(CallNew* expr) {
return GetInfoCell(expr->CallNewFeedbackId());
}
@@ -364,148 +352,84 @@ bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
}
-static TypeInfo TypeFromCompareType(CompareIC::State state) {
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case CompareIC::SMI:
- return TypeInfo::Smi();
- case CompareIC::NUMBER:
- return TypeInfo::Number();
- case CompareIC::INTERNALIZED_STRING:
- return TypeInfo::InternalizedString();
- case CompareIC::STRING:
- return TypeInfo::String();
- case CompareIC::OBJECT:
- case CompareIC::KNOWN_OBJECT:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return TypeInfo::Unknown();
- }
-}
-
-
-void TypeFeedbackOracle::CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) {
- *left_type = *right_type = *overall_type = unknown;
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* combined_type) {
+ Handle<Object> info = GetInfo(id);
+ if (!info->IsCode()) {
+ // For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
+ *left_type = *right_type = *combined_type = handle(Type::None(), isolate_);
return;
}
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) {
- *left_type = *right_type = *overall_type = unknown;
- return;
- }
-
- int stub_minor_key = code->stub_info();
- CompareIC::State left_state, right_state, handler_state;
- ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
- &handler_state, NULL);
- *left_type = TypeFromCompareType(left_state);
- *right_type = TypeFromCompareType(right_state);
- *overall_type = TypeFromCompareType(handler_state);
-}
-
+ Handle<Code> code = Handle<Code>::cast(info);
-Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return Handle<Map>::null();
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- if (state != CompareIC::KNOWN_OBJECT) {
- return Handle<Map>::null();
+ Handle<Map> map;
+ Map* raw_map = code->FindFirstMap();
+ if (raw_map != NULL) {
+ raw_map = raw_map->CurrentMapForDeprecated();
+ if (raw_map != NULL && !CanRetainOtherContext(raw_map, *native_context_)) {
+ map = handle(raw_map, isolate_);
+ }
}
- Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(map);
-}
-
-TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->UnaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_unary_op_stub());
- UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
- code->unary_op_type());
- switch (type) {
- case UnaryOpIC::SMI:
- return TypeInfo::Smi();
- case UnaryOpIC::NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
+ if (code->is_compare_ic_stub()) {
+ int stub_minor_key = code->stub_info();
+ CompareIC::StubInfoToType(
+ stub_minor_key, left_type, right_type, combined_type, map, isolate());
+ } else if (code->is_compare_nil_ic_stub()) {
+ CompareNilICStub::State state(code->compare_nil_state());
+ *combined_type = CompareNilICStub::StateToType(isolate_, state, map);
+ Handle<Type> nil_type = handle(code->compare_nil_value() == kNullValue
+ ? Type::Null() : Type::Undefined(), isolate_);
+ *left_type = *right_type =
+ handle(Type::Union(*combined_type, nil_type), isolate_);
}
}
-static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
- switch (binary_type) {
- // Uninitialized means never executed.
- case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI: return TypeInfo::Smi();
- case BinaryOpIC::INT32: return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER: return TypeInfo::Double();
- case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
- case BinaryOpIC::STRING: return TypeInfo::String();
- case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
+Handle<Type> TypeFeedbackOracle::UnaryType(TypeFeedbackId id) {
+ Handle<Object> object = GetInfo(id);
+ if (!object->IsCode()) {
+ return handle(Type::None(), isolate());
}
- UNREACHABLE();
- return TypeInfo::Unknown();
+ Handle<Code> code = Handle<Code>::cast(object);
+ ASSERT(code->is_unary_op_stub());
+ return UnaryOpIC::TypeInfoToType(
+ static_cast<UnaryOpIC::TypeInfo>(code->unary_op_type()), isolate());
}
-void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result,
- bool* has_fixed_right_arg,
- int* fixed_right_arg_value) {
- Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Maybe<int>* fixed_right_arg) {
+ Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
- *left = *right = *result = unknown;
+ // For some binary ops we don't have ICs, e.g. Token::COMMA.
+ *left = *right = *result = handle(Type::None(), isolate_);
return;
}
Handle<Code> code = Handle<Code>::cast(object);
- if (code->is_binary_op_stub()) {
- int minor_key = code->stub_info();
- BinaryOpIC::TypeInfo left_type, right_type, result_type;
- BinaryOpStub::decode_types_from_minor_key(
- minor_key, &left_type, &right_type, &result_type);
- *left = TypeFromBinaryOpType(left_type);
- *right = TypeFromBinaryOpType(right_type);
- *result = TypeFromBinaryOpType(result_type);
- *has_fixed_right_arg =
- BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(minor_key);
- *fixed_right_arg_value =
- BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(minor_key);
- return;
- }
- // Not a binary op stub.
- *left = *right = *result = unknown;
+ ASSERT(code->is_binary_op_stub());
+
+ int minor_key = code->stub_info();
+ BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
+ *fixed_right_arg =
+ BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
}
-TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
- Handle<Object> object = GetInfo(clause->CompareId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- return TypeFromCompareType(state);
+Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
+ Handle<Type> result(Type::None(), isolate_);
+ if (info->IsCode() && Handle<Code>::cast(info)->is_compare_ic_stub()) {
+ Handle<Code> code = Handle<Code>::cast(info);
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ result = CompareIC::StateToType(isolate_, state);
+ }
+ return result;
}
@@ -569,7 +493,8 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
} else if (object->IsMap()) {
types->AddMapIfMissing(Handle<Map>::cast(object), zone());
- } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) {
+ } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC ||
+ Handle<Code>::cast(object)->ic_state() == MONOMORPHIC) {
CollectPolymorphicMaps(Handle<Code>::cast(object), types);
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
@@ -636,17 +561,6 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
}
-byte TypeFeedbackOracle::CompareNilTypes(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (object->IsCode() &&
- Handle<Code>::cast(object)->is_compare_nil_ic_stub()) {
- return Handle<Code>::cast(object)->compare_nil_types();
- } else {
- return CompareNilICStub::Types::FullCompare().ToIntegral();
- }
-}
-
-
// Things are a bit tricky here: The iterator for the RelocInfos and the infos
// themselves are not GC-safe, so we first get all infos, then we create the
// dictionary (possibly triggering GC), and finally we relocate the collected
@@ -759,7 +673,7 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
TypeFeedbackId ast_id = cache->AstId(i);
- JSGlobalPropertyCell* cell = cache->Cell(i);
+ Cell* cell = cache->GetCell(i);
Object* value = cell->value();
if (value->IsSmi() ||
(value->IsJSFunction() &&
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 53a83be659..a1c1f54cc1 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "globals.h"
+#include "types.h"
#include "zone-inl.h"
namespace v8 {
@@ -113,7 +114,7 @@ class TypeInfo {
return false;
}
- static TypeInfo TypeFromValue(Handle<Object> value);
+ static TypeInfo FromValue(Handle<Object> value);
bool Equals(const TypeInfo& other) {
return type_ == other.type_;
@@ -217,12 +218,11 @@ enum StringStubFeedback {
// Forward declarations.
+// TODO(rossberg): these should all go away eventually.
class Assignment;
-class BinaryOperation;
class Call;
class CallNew;
class CaseClause;
-class CompareOperation;
class CompilationInfo;
class CountOperation;
class Expression;
@@ -230,7 +230,6 @@ class ForInStatement;
class ICStub;
class Property;
class SmallMapList;
-class UnaryOperation;
class ObjectLiteral;
class ObjectLiteralProperty;
@@ -245,6 +244,7 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
bool LoadIsPolymorphic(Property* expr);
+ bool StoreIsUninitialized(TypeFeedbackId ast_id);
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
bool StoreIsPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
@@ -257,7 +257,6 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id);
- Handle<Map> CompareNilMonomorphicReceiverType(CompareOperation* expr);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id);
@@ -283,7 +282,7 @@ class TypeFeedbackOracle: public ZoneObject {
CheckType GetCallCheckType(Call* expr);
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- Handle<JSGlobalPropertyCell> GetCallNewAllocationInfoCell(CallNew* expr);
+ Handle<Cell> GetCallNewAllocationInfoCell(CallNew* expr);
Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
@@ -293,27 +292,23 @@ class TypeFeedbackOracle: public ZoneObject {
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
- byte ToBooleanTypes(TypeFeedbackId ast_id);
-
- // TODO(1571) We can't use CompareNilICStub::Types as the return value because
- // of various cylces in our headers. Death to tons of implementations in
- // headers!! :-P
- byte CompareNilTypes(CompareOperation* expr);
+ byte ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
- TypeInfo UnaryType(UnaryOperation* expr);
- void BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result,
- bool* has_fixed_right_arg,
- int* fixed_right_arg_value);
- void CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type);
- Handle<Map> GetCompareMap(CompareOperation* expr);
- TypeInfo SwitchType(CaseClause* clause);
+ Handle<Type> UnaryType(TypeFeedbackId id);
+ void BinaryType(TypeFeedbackId id,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Maybe<int>* fixed_right_arg);
+
+ void CompareType(TypeFeedbackId id,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* combined_type);
+
+ Handle<Type> ClauseType(TypeFeedbackId id);
+
TypeInfo IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
@@ -341,7 +336,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Object> GetInfo(TypeFeedbackId ast_id);
// Return the cell that contains type feedback.
- Handle<JSGlobalPropertyCell> GetInfoCell(TypeFeedbackId ast_id);
+ Handle<Cell> GetInfoCell(TypeFeedbackId ast_id);
private:
Handle<Context> native_context_;
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 04c487f43c..0d90355049 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -37,7 +37,7 @@
function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var offset = IS_UNDEFINED(byteOffset) ? 0 : TO_POSITIVE_INTEGER(byteOffset);
+ var offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length")
if (offset % elementSize !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
@@ -58,7 +58,7 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
newByteLength = bufferByteLength - offset;
newLength = newByteLength / elementSize;
} else {
- var newLength = TO_POSITIVE_INTEGER(length);
+ var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
newByteLength = newLength * elementSize;
}
if (offset + newByteLength > bufferByteLength) {
@@ -68,7 +68,7 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
}
function ConstructByLength(obj, length) {
- var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
var byteLength = l * elementSize;
var buffer = new global.ArrayBuffer(byteLength);
%TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
@@ -76,7 +76,7 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
function ConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
- var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
var byteLength = l * elementSize;
var buffer = new $ArrayBuffer(byteLength);
%TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
@@ -97,7 +97,7 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
throw MakeTypeError("parameterless_typed_array_constr", [name]);
}
} else {
- return new constructor(arg1, arg2, arg3);
+ throw MakeTypeError("constructor_not_function", [name])
}
}
}
@@ -146,7 +146,10 @@ function CreateSubArray(elementSize, constructor) {
}
function TypedArraySet(obj, offset) {
- var intOffset = IS_UNDEFINED(offset) ? 0 : TO_POSITIVE_INTEGER(offset);
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
+ if (intOffset < 0) {
+ throw MakeTypeError("typed_array_set_negative_offset");
+ }
if (%TypedArraySetFastCases(this, obj, intOffset))
return;
@@ -197,3 +200,276 @@ SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
+
+
+// --------------------------- DataView -----------------------------
+
+var $DataView = global.DataView;
+
+function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
+ if (%_IsConstructCall()) {
+ if (!IS_ARRAYBUFFER(buffer)) {
+ throw MakeTypeError('data_view_not_array_buffer', []);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ var offset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ if (offset > bufferByteLength) {
+ throw MakeRangeError('invalid_data_view_offset');
+ }
+ var length = IS_UNDEFINED(byteLength) ?
+ bufferByteLength - offset : TO_INTEGER(byteLength);
+ if (length < 0 || offset + length > bufferByteLength) {
+ throw new MakeRangeError('invalid_data_view_length');
+ }
+ %DataViewInitialize(this, buffer, offset, length);
+ } else {
+ throw MakeTypeError('constructor_not_function', ["DataView"]);
+ }
+}
+
+function DataViewGetBuffer() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.buffer', this]);
+ }
+ return %DataViewGetBuffer(this);
+}
+
+function DataViewGetByteOffset() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.byteOffset', this]);
+ }
+ return %DataViewGetByteOffset(this);
+}
+
+function DataViewGetByteLength() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.byteLength', this]);
+ }
+ return %DataViewGetByteLength(this);
+}
+
+function ToPositiveDataViewOffset(offset) {
+ return ToPositiveInteger(offset, 'invalid_data_view_accessor_offset');
+}
+
+function DataViewGetInt8(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getInt8', this]);
+ }
+ return %DataViewGetInt8(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt8(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setInt8', this]);
+ }
+ %DataViewSetInt8(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint8(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getUint8', this]);
+ }
+ return %DataViewGetUint8(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint8(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setUint8', this]);
+ }
+ %DataViewSetUint8(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetInt16(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getInt16', this]);
+ }
+ return %DataViewGetInt16(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt16(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setInt16', this]);
+ }
+ %DataViewSetInt16(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint16(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getUint16', this]);
+ }
+ return %DataViewGetUint16(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint16(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setUint16', this]);
+ }
+ %DataViewSetUint16(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetInt32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getInt32', this]);
+ }
+ return %DataViewGetInt32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setInt32', this]);
+ }
+ %DataViewSetInt32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getUint32', this]);
+ }
+ return %DataViewGetUint32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setUint32', this]);
+ }
+ %DataViewSetUint32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetFloat32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getFloat32', this]);
+ }
+ return %DataViewGetFloat32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetFloat32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setFloat32', this]);
+ }
+ %DataViewSetFloat32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetFloat64(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.getFloat64', this]);
+ }
+ offset = TO_INTEGER(offset);
+ if (offset < 0) {
+ throw MakeRangeError("invalid_data_view_accessor_offset");
+ }
+ return %DataViewGetFloat64(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetFloat64(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_reciever',
+ ['DataView.setFloat64', this]);
+ }
+ offset = TO_INTEGER(offset);
+ if (offset < 0) {
+ throw MakeRangeError("invalid_data_view_accessor_offset");
+ }
+ %DataViewSetFloat64(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function SetupDataView() {
+ %CheckIsBootstrapping();
+
+ // Setup the DataView constructor.
+ %SetCode($DataView, DataViewConstructor);
+ %FunctionSetPrototype($DataView, new $Object);
+
+ // Set up constructor property on the DataView prototype.
+ %SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
+
+ InstallGetter($DataView.prototype, "buffer", DataViewGetBuffer);
+ InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
+ InstallGetter($DataView.prototype, "byteLength", DataViewGetByteLength);
+
+ InstallFunctions($DataView.prototype, DONT_ENUM, $Array(
+ "getInt8", DataViewGetInt8,
+ "setInt8", DataViewSetInt8,
+
+ "getUint8", DataViewGetUint8,
+ "setUint8", DataViewSetUint8,
+
+ "getInt16", DataViewGetInt16,
+ "setInt16", DataViewSetInt16,
+
+ "getUint16", DataViewGetUint16,
+ "setUint16", DataViewSetUint16,
+
+ "getInt32", DataViewGetInt32,
+ "setInt32", DataViewSetInt32,
+
+ "getUint32", DataViewGetUint32,
+ "setUint32", DataViewSetUint32,
+
+ "getFloat32", DataViewGetFloat32,
+ "setFloat32", DataViewSetFloat32,
+
+ "getFloat64", DataViewGetFloat64,
+ "setFloat64", DataViewSetFloat64
+ ));
+}
+
+SetupDataView();
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index f7fbd2d69b..1275deacb7 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -30,6 +30,84 @@
namespace v8 {
namespace internal {
+int Type::NumClasses() {
+ if (is_class()) {
+ return 1;
+ } else if (is_union()) {
+ Handle<Unioned> unioned = as_union();
+ int result = 0;
+ for (int i = 0; i < unioned->length(); ++i) {
+ if (union_get(unioned, i)->is_class()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+
+int Type::NumConstants() {
+ if (is_constant()) {
+ return 1;
+ } else if (is_union()) {
+ Handle<Unioned> unioned = as_union();
+ int result = 0;
+ for (int i = 0; i < unioned->length(); ++i) {
+ if (union_get(unioned, i)->is_constant()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+
+template<class T>
+Handle<Type> Type::Iterator<T>::get_type() {
+ ASSERT(!Done());
+ return type_->is_union() ? union_get(type_->as_union(), index_) : type_;
+}
+
+template<>
+Handle<Map> Type::Iterator<Map>::Current() {
+ return get_type()->as_class();
+}
+
+template<>
+Handle<v8::internal::Object> Type::Iterator<v8::internal::Object>::Current() {
+ return get_type()->as_constant();
+}
+
+
+template<>
+bool Type::Iterator<Map>::matches(Handle<Type> type) {
+ return type->is_class();
+}
+
+template<>
+bool Type::Iterator<v8::internal::Object>::matches(Handle<Type> type) {
+ return type->is_constant();
+}
+
+
+template<class T>
+void Type::Iterator<T>::Advance() {
+ ++index_;
+ if (type_->is_union()) {
+ Handle<Unioned> unioned = type_->as_union();
+ for (; index_ < unioned->length(); ++index_) {
+ if (matches(union_get(unioned, index_))) return;
+ }
+ } else if (index_ == 0 && matches(type_)) {
+ return;
+ }
+ index_ = -1;
+}
+
+template class Type::Iterator<Map>;
+template class Type::Iterator<v8::internal::Object>;
+
+
// Get the smallest bitset subsuming this type.
int Type::LubBitset() {
if (this->is_bitset()) {
@@ -46,9 +124,15 @@ int Type::LubBitset() {
if (this->is_class()) {
map = *this->as_class();
} else {
- v8::internal::Object* value = this->as_constant()->value();
+ Handle<v8::internal::Object> value = this->as_constant();
if (value->IsSmi()) return kSmi;
- map = HeapObject::cast(value)->map();
+ map = HeapObject::cast(*value)->map();
+ if (map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsUndefined()) return kUndefined;
+ if (value->IsNull()) return kNull;
+ if (value->IsTrue() || value->IsFalse()) return kBoolean;
+ if (value->IsTheHole()) return kAny;
+ }
}
switch (map->instance_type()) {
case STRING_TYPE:
@@ -56,6 +140,7 @@ int Type::LubBitset() {
case CONS_STRING_TYPE:
case CONS_ASCII_STRING_TYPE:
case SLICED_STRING_TYPE:
+ case SLICED_ASCII_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ASCII_STRING_TYPE:
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
@@ -90,16 +175,36 @@ int Type::LubBitset() {
case JS_GLOBAL_PROXY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
- case JS_REGEXP_TYPE:
+ if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_ARRAY_TYPE:
return kArray;
case JS_FUNCTION_TYPE:
return kFunction;
+ case JS_REGEXP_TYPE:
+ return kRegExp;
case JS_PROXY_TYPE:
case JS_FUNCTION_PROXY_TYPE:
return kProxy;
+ case MAP_TYPE:
+ // When compiling stub templates, the meta map is used as a place holder
+ // for the actual map with which the template is later instantiated.
+ // We treat it as a kind of type variable whose upper bound is Any.
+ // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
+ // we must exclude Undetectable here. This makes no sense, really,
+ // because it means that the template isn't actually parametric.
+ // Also, it doesn't apply elsewhere. 8-(
+ // We ought to find a cleaner solution for compiling stubs parameterised
+ // over type or class variables, esp ones with bounds...
+ return kDetectable;
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ return kInternal;
default:
UNREACHABLE();
return kNone;
@@ -122,7 +227,7 @@ int Type::GlbBitset() {
// Check this <= that.
-bool Type::Is(Handle<Type> that) {
+bool Type::IsSlowCase(Type* that) {
// Fast path for bitsets.
if (that->is_bitset()) {
return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
@@ -132,8 +237,7 @@ bool Type::Is(Handle<Type> that) {
return this->is_class() && *this->as_class() == *that->as_class();
}
if (that->is_constant()) {
- return this->is_constant() &&
- this->as_constant()->value() == that->as_constant()->value();
+ return this->is_constant() && *this->as_constant() == *that->as_constant();
}
// (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
@@ -148,6 +252,7 @@ bool Type::Is(Handle<Type> that) {
// T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
// (iff T is not a union)
+ ASSERT(!this->is_union());
if (that->is_union()) {
Handle<Unioned> unioned = that->as_union();
for (int i = 0; i < unioned->length(); ++i) {
@@ -163,7 +268,7 @@ bool Type::Is(Handle<Type> that) {
// Check this overlaps that.
-bool Type::Maybe(Handle<Type> that) {
+bool Type::Maybe(Type* that) {
// Fast path for bitsets.
if (this->is_bitset()) {
return (this->as_bitset() & that->LubBitset()) != 0;
@@ -172,14 +277,6 @@ bool Type::Maybe(Handle<Type> that) {
return (this->LubBitset() & that->as_bitset()) != 0;
}
- if (this->is_class()) {
- return that->is_class() && *this->as_class() == *that->as_class();
- }
- if (this->is_constant()) {
- return that->is_constant() &&
- this->as_constant()->value() == that->as_constant()->value();
- }
-
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
if (this->is_union()) {
Handle<Unioned> unioned = this->as_union();
@@ -200,6 +297,14 @@ bool Type::Maybe(Handle<Type> that) {
return false;
}
+ ASSERT(!that->is_union());
+ if (this->is_class()) {
+ return that->is_class() && *this->as_class() == *that->as_class();
+ }
+ if (this->is_constant()) {
+ return that->is_constant() && *this->as_constant() == *that->as_constant();
+ }
+
return false;
}
@@ -208,12 +313,12 @@ bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
ASSERT(!this->is_union());
for (int i = 0; i < current_size; ++i) {
Handle<Type> type = union_get(unioned, i);
- if (type->is_bitset() ? this->Is(type) : this == *type) return true;
+ if (this->Is(type)) return true;
}
return false;
}
-// Get non-bitsets from this which are not subsumed by that, store at unioned,
+// Get non-bitsets from this which are not subsumed by union, store at unioned,
// starting at index. Returns updated index.
int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
int old_size = current_size;
@@ -240,6 +345,12 @@ Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
return from_bitset(type1->as_bitset() | type2->as_bitset());
}
+ // Fast case: top or bottom types.
+ if (type1->SameValue(Type::Any())) return *type1;
+ if (type2->SameValue(Type::Any())) return *type2;
+ if (type1->SameValue(Type::None())) return *type2;
+ if (type2->SameValue(Type::None())) return *type1;
+
// Semi-fast case: Unioned objects are neither involved nor produced.
if (!(type1->is_union() || type2->is_union())) {
if (type1->Is(type2)) return *type2;
@@ -280,6 +391,85 @@ Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
}
+// Get non-bitsets from this which are also in that, store at unioned,
+// starting at index. Returns updated index.
+int Type::ExtendIntersection(
+ Handle<Unioned> result, Handle<Type> that, int current_size) {
+ int old_size = current_size;
+ if (this->is_class() || this->is_constant()) {
+ if (this->Is(that) && !this->InUnion(result, old_size))
+ result->set(current_size++, this);
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
+ if (type->is_bitset()) continue;
+ if (type->Is(that) && !type->InUnion(result, old_size))
+ result->set(current_size++, *type);
+ }
+ }
+ return current_size;
+}
+
+
+// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions.
+// TODO(rossberg): Should we use object sets somehow? Is it worth it?
+Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
+ // Fast case: bit sets.
+ if (type1->is_bitset() && type2->is_bitset()) {
+ return from_bitset(type1->as_bitset() & type2->as_bitset());
+ }
+
+ // Fast case: top or bottom types.
+ if (type1->SameValue(Type::None())) return *type1;
+ if (type2->SameValue(Type::None())) return *type2;
+ if (type1->SameValue(Type::Any())) return *type2;
+ if (type2->SameValue(Type::Any())) return *type1;
+
+ // Semi-fast case: Unioned objects are neither involved nor produced.
+ if (!(type1->is_union() || type2->is_union())) {
+ if (type1->Is(type2)) return *type1;
+ if (type2->Is(type1)) return *type2;
+ }
+
+ // Slow case: may need to produce a Unioned object.
+ Isolate* isolate = NULL;
+ int size = 0;
+ if (!type1->is_bitset()) {
+ isolate = HeapObject::cast(*type1)->GetIsolate();
+ size = (type1->is_union() ? type1->as_union()->length() : 2);
+ }
+ if (!type2->is_bitset()) {
+ isolate = HeapObject::cast(*type2)->GetIsolate();
+ int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
+ size = (size == 0 ? size2 : Min(size, size2));
+ }
+ ASSERT(isolate != NULL);
+ ASSERT(size >= 2);
+ Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ size = 0;
+
+ int bitset = type1->GlbBitset() & type2->GlbBitset();
+ if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
+ size = type1->ExtendIntersection(unioned, type2, size);
+ size = type2->ExtendIntersection(unioned, type1, size);
+
+ if (size == 0) {
+ return None();
+ } else if (size == 1) {
+ return *union_get(unioned, 0);
+ } else if (size == unioned->length()) {
+ return from_handle(unioned);
+ }
+
+ // There were dropped cases. Copy to smaller union.
+ Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
+ for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
+ return from_handle(result);
+}
+
+
Type* Type::Optional(Handle<Type> type) {
return type->is_bitset()
? from_bitset(type->as_bitset() | kUndefined)
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 6db9bfbb6a..a2bcda6579 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -48,14 +48,19 @@ namespace internal {
// T <= Any
//
// Oddball = Boolean \/ Null \/ Undefined
-// Number = Smi \/ Double
+// Number = Signed32 \/ Unsigned32 \/ Double
+// Smi <= Signed32
// Name = String \/ Symbol
// UniqueName = InternalizedString \/ Symbol
// InternalizedString < String
//
+// Allocated = Receiver \/ Number \/ Name
+// Detectable = Allocated - Undetectable
+// Undetectable < Object
// Receiver = Object \/ Proxy
// Array < Object
// Function < Object
+// RegExp < Object
//
// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
@@ -70,17 +75,21 @@ namespace internal {
// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
//
-// Typically, the latter should be used to check whether a specific case needs
-// handling (e.g., via T->Maybe(Number)).
+// Typically, the former is to be used to select representations (e.g., via
+// T->Is(Integer31())), and the to check whether a specific case needs handling
+// (e.g., via T->Maybe(Number())).
//
// There is no functionality to discover whether a type is a leaf in the
// lattice. That is intentional. It should always be possible to refine the
// lattice (e.g., splitting up number types further) without invalidating any
// existing assumptions or tests.
//
+// Consequently, do not use pointer equality for type tests, always use Is!
+//
// Internally, all 'primitive' types, and their unions, are represented as
// bitsets via smis. Class is a heap pointer to the respective map. Only
// Constant's, or unions containing Class'es or Constant's, require allocation.
+// Note that the bitset representation is closed under both Union and Intersect.
//
// The type representation is heap-allocated, so cannot (currently) be used in
// a parallel compilation context.
@@ -89,6 +98,8 @@ class Type : public Object {
public:
static Type* None() { return from_bitset(kNone); }
static Type* Any() { return from_bitset(kAny); }
+ static Type* Allocated() { return from_bitset(kAllocated); }
+ static Type* Detectable() { return from_bitset(kDetectable); }
static Type* Oddball() { return from_bitset(kOddball); }
static Type* Boolean() { return from_bitset(kBoolean); }
@@ -97,7 +108,10 @@ class Type : public Object {
static Type* Number() { return from_bitset(kNumber); }
static Type* Smi() { return from_bitset(kSmi); }
+ static Type* Signed32() { return from_bitset(kSigned32); }
+ static Type* Unsigned32() { return from_bitset(kUnsigned32); }
static Type* Double() { return from_bitset(kDouble); }
+ static Type* NumberOrString() { return from_bitset(kNumberOrString); }
static Type* Name() { return from_bitset(kName); }
static Type* UniqueName() { return from_bitset(kUniqueName); }
@@ -107,9 +121,12 @@ class Type : public Object {
static Type* Receiver() { return from_bitset(kReceiver); }
static Type* Object() { return from_bitset(kObject); }
+ static Type* Undetectable() { return from_bitset(kUndetectable); }
static Type* Array() { return from_bitset(kArray); }
static Type* Function() { return from_bitset(kFunction); }
+ static Type* RegExp() { return from_bitset(kRegExp); }
static Type* Proxy() { return from_bitset(kProxy); }
+ static Type* Internal() { return from_bitset(kInternal); }
static Type* Class(Handle<Map> map) { return from_handle(map); }
static Type* Constant(Handle<HeapObject> value) {
@@ -120,12 +137,52 @@ class Type : public Object {
}
static Type* Union(Handle<Type> type1, Handle<Type> type2);
+ static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
static Type* Optional(Handle<Type> type); // type \/ Undefined
- bool Is(Handle<Type> that);
- bool Maybe(Handle<Type> that);
+ bool Is(Type* that) { return (this == that) ? true : IsSlowCase(that); }
+ bool Is(Handle<Type> that) { return this->Is(*that); }
+ bool Maybe(Type* that);
+ bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
+
+ bool IsClass() { return is_class(); }
+ bool IsConstant() { return is_constant(); }
+ Handle<Map> AsClass() { return as_class(); }
+ Handle<v8::internal::Object> AsConstant() { return as_constant(); }
+
+ int NumClasses();
+ int NumConstants();
+
+ template<class T>
+ class Iterator {
+ public:
+ bool Done() const { return index_ < 0; }
+ Handle<T> Current();
+ void Advance();
- // TODO(rossberg): method to iterate unions?
+ private:
+ friend class Type;
+
+ Iterator() : index_(-1) {}
+ explicit Iterator(Handle<Type> type) : type_(type), index_(-1) {
+ Advance();
+ }
+
+ inline bool matches(Handle<Type> type);
+ inline Handle<Type> get_type();
+
+ Handle<Type> type_;
+ int index_;
+ };
+
+ Iterator<Map> Classes() {
+ if (this->is_bitset()) return Iterator<Map>();
+ return Iterator<Map>(this->handle());
+ }
+ Iterator<v8::internal::Object> Constants() {
+ if (this->is_bitset()) return Iterator<v8::internal::Object>();
+ return Iterator<v8::internal::Object>(this->handle());
+ }
private:
// A union is a fixed array containing types. Invariants:
@@ -139,23 +196,32 @@ class Type : public Object {
kUndefined = 1 << 1,
kBoolean = 1 << 2,
kSmi = 1 << 3,
- kDouble = 1 << 4,
- kSymbol = 1 << 5,
- kInternalizedString = 1 << 6,
- kOtherString = 1 << 7,
- kArray = 1 << 8,
- kFunction = 1 << 9,
- kOtherObject = 1 << 10,
- kProxy = 1 << 11,
+ kOtherSigned32 = 1 << 4,
+ kUnsigned32 = 1 << 5,
+ kDouble = 1 << 6,
+ kSymbol = 1 << 7,
+ kInternalizedString = 1 << 8,
+ kOtherString = 1 << 9,
+ kUndetectable = 1 << 10,
+ kArray = 1 << 11,
+ kFunction = 1 << 12,
+ kRegExp = 1 << 13,
+ kOtherObject = 1 << 14,
+ kProxy = 1 << 15,
+ kInternal = 1 << 16,
kOddball = kBoolean | kNull | kUndefined,
- kNumber = kSmi | kDouble,
+ kSigned32 = kSmi | kOtherSigned32,
+ kNumber = kSigned32 | kUnsigned32 | kDouble,
kString = kInternalizedString | kOtherString,
kUniqueName = kSymbol | kInternalizedString,
kName = kSymbol | kString,
- kObject = kArray | kFunction | kOtherObject,
+ kNumberOrString = kNumber | kString,
+ kObject = kUndetectable | kArray | kFunction | kRegExp | kOtherObject,
kReceiver = kObject | kProxy,
- kAny = kOddball | kNumber | kName | kReceiver,
+ kAllocated = kDouble | kName | kReceiver,
+ kAny = kOddball | kNumber | kAllocated | kInternal,
+ kDetectable = kAllocated - kUndetectable,
kNone = 0
};
@@ -164,9 +230,14 @@ class Type : public Object {
bool is_constant() { return this->IsBox(); }
bool is_union() { return this->IsFixedArray(); }
+ bool IsSlowCase(Type* that);
+
int as_bitset() { return Smi::cast(this)->value(); }
Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
- Handle<Box> as_constant() { return Handle<Box>::cast(handle()); }
+ Handle<v8::internal::Object> as_constant() {
+ Handle<Box> box = Handle<Box>::cast(handle());
+ return v8::internal::handle(box->value(), box->GetIsolate());
+ }
Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
Handle<Type> handle() { return handle_via_isolate_of(this); }
@@ -192,6 +263,8 @@ class Type : public Object {
int GlbBitset(); // greatest lower bound that's a bitset
bool InUnion(Handle<Unioned> unioned, int current_size);
int ExtendUnion(Handle<Unioned> unioned, int current_size);
+ int ExtendIntersection(
+ Handle<Unioned> unioned, Handle<Type> type, int current_size);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 4ba67213a1..7c116120a2 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -52,7 +52,7 @@ AstTyper::AstTyper(CompilationInfo* info)
} while (false)
-void AstTyper::Type(CompilationInfo* info) {
+void AstTyper::Run(CompilationInfo* info) {
AstTyper* visitor = new(info->zone()) AstTyper(info);
Scope* scope = info->scope();
@@ -295,7 +295,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
!CompileTimeValue::IsCompileTimeValue(prop->value())) ||
prop->kind() == ObjectLiteral::Property::COMPUTED) {
- if (prop->key()->handle()->IsInternalizedString() && prop->emit_store())
+ if (prop->key()->value()->IsInternalizedString() && prop->emit_store())
prop->RecordTypeFeedback(oracle());
}
}
@@ -404,7 +404,9 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
CHECK_ALIVE(Visit(expr->expression()));
- expr->RecordTypeFeedback(oracle());
+ // Collect type feedback.
+ Handle<Type> op_type = oracle()->UnaryType(expr->UnaryOperationFeedbackId());
+ MergeLowerType(expr->expression(), op_type);
if (expr->op() == Token::NOT) {
// TODO(rossberg): only do in test or value context.
expr->expression()->RecordToBooleanTypeFeedback(oracle());
@@ -429,7 +431,15 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
CHECK_ALIVE(Visit(expr->left()));
CHECK_ALIVE(Visit(expr->right()));
- expr->RecordTypeFeedback(oracle());
+ // Collect type feedback.
+ Handle<Type> left_type, right_type, result_type;
+ Maybe<int> fixed_right_arg;
+ oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
+ &left_type, &right_type, &result_type, &fixed_right_arg);
+ MergeLowerType(expr->left(), left_type);
+ MergeLowerType(expr->right(), right_type);
+ expr->set_result_type(result_type);
+ expr->set_fixed_right_arg(fixed_right_arg);
if (expr->op() == Token::OR || expr->op() == Token::AND) {
expr->left()->RecordToBooleanTypeFeedback(oracle());
}
@@ -441,7 +451,13 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(Visit(expr->left()));
CHECK_ALIVE(Visit(expr->right()));
- expr->RecordTypeFeedback(oracle());
+ // Collect type feedback.
+ Handle<Type> left_type, right_type, combined_type;
+ oracle()->CompareType(expr->CompareOperationFeedbackId(),
+ &left_type, &right_type, &combined_type);
+ MergeLowerType(expr->left(), left_type);
+ MergeLowerType(expr->right(), right_type);
+ expr->set_combined_type(combined_type);
}
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index d8708c2ccb..2d3fac0650 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -43,7 +43,7 @@ namespace internal {
class AstTyper: public AstVisitor {
public:
- static void Type(CompilationInfo* info);
+ static void Run(CompilationInfo* info);
void* operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
@@ -62,6 +62,13 @@ class AstTyper: public AstVisitor {
TypeFeedbackOracle* oracle() { return &oracle_; }
Zone* zone() const { return info_->zone(); }
+ void MergeLowerType(Expression* e, Handle<Type> t) {
+ e->set_lower_type(handle(Type::Union(e->lower_type(), t), isolate_));
+ }
+ void MergeUpperType(Expression* e, Handle<Type> t) {
+ e->set_upper_type(handle(Type::Intersect(e->upper_type(), t), isolate_));
+ }
+
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
diff --git a/deps/v8/src/unbound-queue-inl.h b/deps/v8/src/unbound-queue-inl.h
index fffb1dbcfb..796ba401d5 100644
--- a/deps/v8/src/unbound-queue-inl.h
+++ b/deps/v8/src/unbound-queue-inl.h
@@ -30,6 +30,8 @@
#include "unbound-queue.h"
+#include "atomicops.h"
+
namespace v8 {
namespace internal {
@@ -66,11 +68,12 @@ void UnboundQueue<Record>::DeleteFirst() {
template<typename Record>
-void UnboundQueue<Record>::Dequeue(Record* rec) {
- ASSERT(divider_ != last_);
+bool UnboundQueue<Record>::Dequeue(Record* rec) {
+ if (divider_ == Acquire_Load(&last_)) return false;
Node* next = reinterpret_cast<Node*>(divider_)->next;
*rec = next->value;
- OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
+ Release_Store(&divider_, reinterpret_cast<AtomicWord>(next));
+ return true;
}
@@ -78,14 +81,23 @@ template<typename Record>
void UnboundQueue<Record>::Enqueue(const Record& rec) {
Node*& next = reinterpret_cast<Node*>(last_)->next;
next = new Node(rec);
- OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
- while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
+ Release_Store(&last_, reinterpret_cast<AtomicWord>(next));
+
+ while (first_ != reinterpret_cast<Node*>(Acquire_Load(&divider_))) {
+ DeleteFirst();
+ }
+}
+
+
+template<typename Record>
+bool UnboundQueue<Record>::IsEmpty() const {
+ return NoBarrier_Load(&divider_) == NoBarrier_Load(&last_);
}
template<typename Record>
-Record* UnboundQueue<Record>::Peek() {
- ASSERT(divider_ != last_);
+Record* UnboundQueue<Record>::Peek() const {
+ if (divider_ == Acquire_Load(&last_)) return NULL;
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
diff --git a/deps/v8/src/unbound-queue.h b/deps/v8/src/unbound-queue.h
index 59a426b7fe..429e3c673e 100644
--- a/deps/v8/src/unbound-queue.h
+++ b/deps/v8/src/unbound-queue.h
@@ -46,10 +46,10 @@ class UnboundQueue BASE_EMBEDDED {
inline UnboundQueue();
inline ~UnboundQueue();
- INLINE(void Dequeue(Record* rec));
+ INLINE(bool Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
- INLINE(bool IsEmpty()) { return divider_ == last_; }
- INLINE(Record* Peek());
+ INLINE(bool IsEmpty() const);
+ INLINE(Record* Peek() const);
private:
INLINE(void DeleteFirst());
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index c810cbac79..dfe1e20c32 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -63,12 +63,16 @@ namespace internal {
V8.MemoryExternalFragmentationMapSpace) \
HP(external_fragmentation_cell_space, \
V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_property_cell_space, \
+ V8.MemoryExternalFragmentationPropertyCellSpace) \
HP(external_fragmentation_lo_space, \
V8.MemoryExternalFragmentationLoSpace) \
HP(heap_fraction_map_space, \
V8.MemoryHeapFractionMapSpace) \
HP(heap_fraction_cell_space, \
V8.MemoryHeapFractionCellSpace) \
+ HP(heap_fraction_property_cell_space, \
+ V8.MemoryHeapFractionPropertyCellSpace) \
#define HISTOGRAM_MEMORY_LIST(HM) \
@@ -77,7 +81,9 @@ namespace internal {
HM(heap_sample_map_space_committed, \
V8.MemoryHeapSampleMapSpaceCommitted) \
HM(heap_sample_cell_space_committed, \
- V8.MemoryHeapSampleCellSpaceCommitted)
+ V8.MemoryHeapSampleCellSpaceCommitted) \
+ HM(heap_sample_property_cell_space_committed, \
+ V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
@@ -88,8 +94,6 @@ namespace internal {
#define STATS_COUNTER_LIST_1(SC) \
/* Global Handle Count*/ \
SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
/* OS Memory allocated */ \
SC(memory_allocated, V8.OsMemoryAllocated) \
SC(normalized_maps, V8.NormalizedMaps) \
@@ -108,8 +112,6 @@ namespace internal {
SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
SC(string_ctor_calls, V8.StringConstructorCalls) \
SC(string_ctor_conversions, V8.StringConstructorConversions) \
SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
@@ -127,8 +129,6 @@ namespace internal {
SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
/* Amount of compiled source code. */ \
SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the old codegen. */ \
- SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
/* Amount of source code compiled with the full codegen. */ \
SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
/* Number of contexts created from scratch. */ \
@@ -155,8 +155,6 @@ namespace internal {
V8.GCCompactorCausedByPromotedData) \
SC(gc_compactor_caused_by_oldspace_exhaustion, \
V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_compactor_caused_by_weak_handles, \
- V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
/* How is the generic keyed-load stub used? */ \
@@ -171,39 +169,9 @@ namespace internal {
SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- /* Count how much the monomorphic keyed-load stubs are hit. */ \
- SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
- SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
- SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
- SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
- SC(keyed_load_field, V8.KeyedLoadField) \
- SC(keyed_load_callback, V8.KeyedLoadCallback) \
- SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
- SC(keyed_load_inline, V8.KeyedLoadInline) \
- SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
- SC(named_load_inline, V8.NamedLoadInline) \
- SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
- SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
- SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
- SC(dont_delete_hint_hit, V8.DontDeleteHintHit) \
- SC(dont_delete_hint_miss, V8.DontDeleteHintMiss) \
SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
- SC(keyed_store_field, V8.KeyedStoreField) \
- SC(named_store_inline_field, V8.NamedStoreInlineField) \
- SC(keyed_store_inline, V8.KeyedStoreInline) \
- SC(named_load_inline_generic, V8.NamedLoadInlineGeneric) \
- SC(named_load_inline_field, V8.NamedLoadInlineFast) \
- SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
- SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
- SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
- SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
- SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
@@ -225,7 +193,6 @@ namespace internal {
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
@@ -237,9 +204,7 @@ namespace internal {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
@@ -273,10 +238,9 @@ namespace internal {
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(smi_checks_removed, V8.SmiChecksRemoved) \
- SC(map_checks_removed, V8.MapChecksRemoved) \
- SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
+ SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
+ SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
+ SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
@@ -297,6 +261,12 @@ namespace internal {
SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(property_cell_space_bytes_available, \
+ V8.MemoryPropertyCellSpaceBytesAvailable) \
+ SC(property_cell_space_bytes_committed, \
+ V8.MemoryPropertyCellSpaceBytesCommitted) \
+ SC(property_cell_space_bytes_used, \
+ V8.MemoryPropertyCellSpaceBytesUsed) \
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 80b12deea6..cb67105c3f 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -271,6 +271,44 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
if (FLAG_trace_hydrogen) FLAG_parallel_recompilation = false;
+
+ if (FLAG_sweeper_threads <= 0) {
+ if (FLAG_concurrent_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::CONCURRENT_SWEEPING);
+ } else if (FLAG_parallel_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_SWEEPING);
+ }
+ if (FLAG_sweeper_threads == 0) {
+ FLAG_concurrent_sweeping = false;
+ FLAG_parallel_sweeping = false;
+ }
+ } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
+ FLAG_sweeper_threads = 0;
+ }
+
+ if (FLAG_parallel_marking) {
+ if (FLAG_marking_threads <= 0) {
+ FLAG_marking_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_MARKING);
+ }
+ if (FLAG_marking_threads == 0) {
+ FLAG_parallel_marking = false;
+ }
+ } else {
+ FLAG_marking_threads = 0;
+ }
+
+ if (FLAG_parallel_recompilation &&
+ SystemThreadManager::NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
+ FLAG_parallel_recompilation = false;
+ }
+
OS::SetUp();
Sampler::SetUp();
CPU::SetUp();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index b8a5ae4380..47893e8215 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -48,9 +48,6 @@
#error both DEBUG and NDEBUG are set
#endif
-// TODO(dcarney): remove this
-#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
-
// Basic includes
#include "../include/v8.h"
#include "v8globals.h"
@@ -67,9 +64,7 @@
#include "incremental-marking-inl.h"
#include "mark-compact-inl.h"
#include "log-inl.h"
-#include "cpu-profiler-inl.h"
#include "handles-inl.h"
-#include "heap-snapshot-generator-inl.h"
#include "zone-inl.h"
namespace v8 {
@@ -104,6 +99,8 @@ class V8 : public AllStatic {
// Support for return-address rewriting profilers.
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
+ // Support for entry hooking JITed code.
+ static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
// Random number generation support. Not cryptographically safe.
static uint32_t Random(Context* context);
// We use random numbers internally in memory allocation and in the
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 98940c58e3..4932da93f9 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -180,12 +180,13 @@ enum AllocationSpace {
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
CELL_SPACE, // Only and all cell objects.
+ PROPERTY_CELL_SPACE, // Only and all global property cell objects.
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE,
FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = CELL_SPACE
+ LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index e168b71abc..76eeac6a58 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1786,30 +1786,36 @@ function FunctionBind(this_arg) { // Length is 1.
}
-function NewFunction(arg1) { // length == 1
- var n = %_ArgumentsLength();
+function NewFunctionString(arguments, function_token) {
+ var n = arguments.length;
var p = '';
if (n > 1) {
- p = new InternalArray(n - 1);
- for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i);
- p = Join(p, n - 1, ',', NonStringToString);
+ p = ToString(arguments[0]);
+ for (var i = 1; i < n - 1; i++) {
+ p += ',' + ToString(arguments[i]);
+ }
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
if (%_CallFunction(p, ')', StringIndexOf) != -1) {
- throw MakeSyntaxError('paren_in_arg_string',[]);
+ throw MakeSyntaxError('paren_in_arg_string', []);
}
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
p += '\n/' + '**/';
}
- var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
- var source = '(function(' + p + ') {\n' + body + '\n})';
+ var body = (n > 0) ? ToString(arguments[n - 1]) : '';
+ return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
+}
+
+function FunctionConstructor(arg1) { // length == 1
+ var source = NewFunctionString(arguments, 'function');
var global_receiver = %GlobalReceiver(global);
+ // Compile the string in the constructor and not a helper so that errors
+ // appear to come from here.
var f = %_CallFunction(global_receiver, %CompileString(source, true));
-
%FunctionMarkNameShouldPrintAsAnonymous(f);
return f;
}
@@ -1820,7 +1826,7 @@ function NewFunction(arg1) { // length == 1
function SetUpFunction() {
%CheckIsBootstrapping();
- %SetCode($Function, NewFunction);
+ %SetCode($Function, FunctionConstructor);
%SetProperty($Function.prototype, "constructor", $Function, DONT_ENUM);
InstallFunctions($Function.prototype, DONT_ENUM, $Array(
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index 8661f9b88c..ff9f8f2366 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -257,9 +257,9 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
a = b; // Fake assignment to check assignability.
USE(a);
#endif // DEBUG
-#if defined(V8_HOST_ARCH_IA32)
+#if V8_HOST_ARCH_IA32
#define STOS "stosl"
-#elif defined(V8_HOST_ARCH_X64)
+#elif V8_HOST_ARCH_X64
#define STOS "stosq"
#endif
#if defined(__native_client__)
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index bad15cf2e0..0041c67c5a 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 19
-#define BUILD_NUMBER 13
+#define MINOR_VERSION 20
+#define BUILD_NUMBER 2
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 91bc528f31..1c231a70b6 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -308,6 +308,7 @@ Address* RelocInfo::target_reference_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -332,24 +333,22 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -445,8 +444,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
@@ -475,8 +474,8 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index f547e7947f..3a3ee9cdb0 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "macro-assembler.h"
#include "serialize.h"
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 2a01b0b24c..2b44a778c7 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "deoptimizer.h"
@@ -44,15 +44,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
+ // -- rax : number of arguments excluding receiver
+ // -- rdi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
// -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
+ // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
// Insert extra arguments.
@@ -456,6 +456,8 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Expects five C++ function parameters.
// - Address entry (ignored)
// - JSFunction* function (
@@ -473,10 +475,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
#ifdef _WIN64
// MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
// [rsp+0x20] : argv
// Clear the context before we push it when entering the internal frame.
@@ -525,9 +527,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
#endif // _WIN64
// Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
+ // [rsp + 2 * kPointerSize ... ] : Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
@@ -756,12 +758,12 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
+ // rsp[0] : Return address
+ // rsp[8] : Argument n
+ // rsp[16] : Argument n-1
// ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
+ // rsp[8 * n] : Argument 1
+ // rsp[8 * (n + 1)] : Receiver (function to call)
//
// rax contains the number of arguments, n, not counting the receiver.
//
@@ -929,18 +931,18 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
+ // rsp : return address
+ // rsp[8] : arguments
+ // rsp[16] : receiver ("this")
+ // rsp[24] : function
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
+ // rbp : Old base pointer
+ // rbp[8] : return address
+ // rbp[16] : function arguments
+ // rbp[24] : receiver
+ // rbp[32] : function
static const int kArgumentsOffset = 2 * kPointerSize;
static const int kReceiverOffset = 3 * kPointerSize;
static const int kFunctionOffset = 4 * kPointerSize;
@@ -1093,369 +1095,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ movq(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- scratch3);
- __ bind(&entry);
- __ decq(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ testq(array_size, array_size);
- __ Assert(not_zero, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, factory->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
-
- __ SmiTest(rdx);
- __ j(not_zero, &not_empty_array);
- __ pop(r8); // Adjust stack.
- __ Drop(1);
- __ push(r8);
- __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r8, &has_non_smi_element);
- }
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ bind(&finish);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(r8,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(rbx);
- __ jmp(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // rbx: JSArray
- __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r11,
- kScratchRegister,
- &cant_transition_map);
-
- __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
- __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1477,26 +1119,15 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1517,61 +1148,16 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
- if (FLAG_optimize_constructed_arrays) {
- // tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
- }
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ Move(rbx, undefined_sentinel);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
-
- // Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index bc2e59a41b..923384853f 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "bootstrapper.h"
#include "code-stubs.h"
@@ -254,7 +254,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
+ // The ToNumber stub takes one argument in rax.
Label check_heap_number, call_builtin;
__ SmiTest(rax);
__ j(not_zero, &check_heap_number, Label::kNear);
@@ -333,7 +333,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
- // rcx holds native context, ebx points to fixed array of 3-element entries
+ // rcx holds native context, rbx points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
@@ -452,8 +452,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
- // [rsp + (1 * kPointerSize)]: function
- // [rsp + (2 * kPointerSize)]: serialized scope info
+ // [rsp + (1 * kPointerSize)] : function
+ // [rsp + (2 * kPointerSize)] : serialized scope info
// Try to allocate the context in new space.
Label gc;
@@ -1232,7 +1232,7 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label right_arg_changed, call_runtime;
- if (op_ == Token::MOD && has_fixed_right_arg_) {
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
@@ -1424,7 +1424,7 @@ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
// If the argument in rdx is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep eax and edx intact
+ // Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, alloc_failure);
// Now rdx can be overwritten losing one of the arguments as we are
@@ -1463,16 +1463,16 @@ void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
+ // rsp[8] : argument (should be number).
+ // rsp[0] : return address.
// Output:
// rax: tagged double result.
// UNTAGGED case:
// Input::
- // rsp[0]: return address.
- // xmm1: untagged double input argument
+ // rsp[0] : return address.
+ // xmm1 : untagged double input argument
// Output:
- // xmm1: untagged double result.
+ // xmm1 : untagged double result.
Label runtime_call;
Label runtime_call_clear_stack;
@@ -1558,7 +1558,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
// Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
+ static_cast<int>(elem2_start - elem_start));
CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
@@ -2212,7 +2213,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result
- // as heap number in eax.
+ // as heap number in rax.
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
@@ -2423,8 +2424,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Stack layout:
- // rsp[0] : return address
- // rsp[8] : number of parameters (tagged)
+ // rsp[0] : return address
+ // rsp[8] : number of parameters (tagged)
// rsp[16] : receiver displacement
// rsp[24] : function
// Registers used over the whole function:
@@ -2639,10 +2640,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[8] : number of parameters
- // esp[16] : receiver displacement
- // esp[24] : function
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -2665,8 +2666,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
@@ -2773,11 +2774,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: last_match_info (expected JSArray)
- // rsp[16]: previous index
- // rsp[24]: subject string
- // rsp[32]: JSRegExp object
+ // rsp[0] : return address
+ // rsp[8] : last_match_info (expected JSArray)
+ // rsp[16] : previous index
+ // rsp[24] : subject string
+ // rsp[32] : JSRegExp object
static const int kLastMatchInfoOffset = 1 * kPointerSize;
static const int kPreviousIndexOffset = 2 * kPointerSize;
@@ -3436,11 +3437,10 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
STATIC_ASSERT(kInternalizedTag != 0);
- __ testb(scratch, Immediate(kIsInternalizedMask));
- __ j(zero, label);
+ __ and_(scratch, Immediate(kIsNotStringMask | kIsInternalizedMask));
+ __ cmpb(scratch, Immediate(kInternalizedTag | kStringTag));
+ __ j(not_equal, label);
}
@@ -3717,56 +3717,17 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // rbx : cache cell for call target
- // rdi : the function to call
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmpq(rcx, rdi);
- __ j(equal, &done, Label::kNear);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -3778,12 +3739,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Special handling of the Array() function, which caches not only the
// monomorphic Array function but the initial ElementsKind with special
// sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
__ JumpIfNotSmi(rcx, &miss);
- __ Cmp(rcx, terminal_kind_sentinel);
- __ j(above, &miss);
+ if (FLAG_debug_code) {
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ Cmp(rcx, terminal_kind_sentinel);
+ __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
+ }
+
// Make sure the function is the Array() function
__ LoadArrayFunction(rcx);
__ cmpq(rdi, rcx);
@@ -3799,7 +3763,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
__ jmp(&done, Label::kNear);
@@ -3817,12 +3781,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> initial_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
GetInitialFastElementsKind());
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
initial_kind_sentinel);
__ jmp(&done);
__ bind(&not_array_function);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+ __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -3860,11 +3824,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Fast-case: Just invoke the function.
@@ -3893,7 +3853,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
}
// Check for function proxy.
@@ -3939,15 +3899,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
+ GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
+ Register jmp_reg = rcx;
__ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movq(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3995,9 +3951,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
- if (FLAG_optimize_constructed_arrays) {
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
- }
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@@ -4179,6 +4133,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Enter the exit frame that transitions from JavaScript to C++.
#ifdef _WIN64
int arg_stack_space = (result_size_ < 2 ? 2 : 4);
@@ -4259,6 +4215,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
@@ -4425,14 +4383,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Implements "value instanceof function" operator.
// Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
+ // rsp[0] : return address
+ // rsp[8] : function pointer
+ // rsp[16] : value
// Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[1] : offset from return address to location of inline cache
- // rsp[2] : function pointer
- // rsp[3] : value
+ // rsp[0] : return address
+ // rsp[8] : offset from return address to location of inline cache
+ // rsp[16] : function pointer
+ // rsp[24] : value
// Returns a bitwise zero to indicate that the value
// is and instance of the function and anything else to
// indicate that the value is not an instance.
@@ -5154,17 +5112,17 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Don't enter the rep movs if there are less than 4 bytes to copy.
Label last_bytes;
- __ testl(count, Immediate(~7));
+ __ testl(count, Immediate(~(kPointerSize - 1)));
__ j(zero, &last_bytes, Label::kNear);
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
+ __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
__ repmovsq();
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
+ __ and_(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -5374,10 +5332,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
+ // rsp[0] : return address
+ // rsp[8] : to
+ // rsp[16] : from
+ // rsp[24] : string
const int kToOffset = 1 * kPointerSize;
const int kFromOffset = kToOffset + kPointerSize;
@@ -5736,9 +5694,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
+ // rsp[0] : return address
+ // rsp[8] : right string
+ // rsp[16] : left string
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
__ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
@@ -5894,9 +5852,13 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
+ __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
+ __ cmpb(tmp1, Immediate(kInternalizedTag | kStringTag));
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
+ __ cmpb(tmp2, Immediate(kInternalizedTag | kStringTag));
+ __ j(not_equal, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
@@ -5939,19 +5901,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- Label succeed1;
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1, Label::kNear);
- __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed1);
-
- Label succeed2;
- __ testb(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2, Label::kNear);
- __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed2);
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
@@ -6013,7 +5964,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&not_same);
// Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
+ // because we already know they are not identical. We also know they are both
+ // strings.
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag != 0);
@@ -6169,13 +6121,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Check if the entry name is not a unique name.
__ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(not_zero, &good, Label::kNear);
- __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, miss);
-
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
__ bind(&good);
}
@@ -6246,9 +6193,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
+ // rsp[0 * kPointerSize] : return address.
+ // rsp[1 * kPointerSize] : key's hash.
+ // rsp[2 * kPointerSize] : key.
// Registers:
// dictionary_: NameDictionary to probe.
// result_: used as scratch.
@@ -6301,15 +6248,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// key we are looking for.
// Check if the entry name is not a unique name.
- Label cont;
__ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(not_zero, &cont);
- __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &maybe_in_dictionary);
- __ bind(&cont);
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -6635,12 +6576,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rbx : array literal
- // -- rdi : map of array literal
- // -- rcx : element index as smi
- // -- rdx : array literal index in function
- // -- rsp[0] : return address
+ // -- rax : element value to store
+ // -- rcx : element index as smi
+ // -- rsp[0] : return address
+ // -- rsp[8] : array literal index in function
+ // -- rsp[16] : array literal
+ // clobbers rbx, rdx, rdi
// -----------------------------------
Label element_done;
@@ -6649,6 +6590,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+
__ CheckFastElements(rdi, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
@@ -6725,7 +6671,11 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // It's always safe to call the entry hook stub, as the hook itself
+ // is not allowed to call back to V8.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -6733,45 +6683,25 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Save volatile registers.
- // Live registers at this point are the same as at the start of any
- // JS function:
- // o rdi: the JS function object being called (i.e. ourselves)
- // o rsi: our context
- // o rbp: our caller's frame pointer
- // o rsp: stack pointer (pointing to return address)
- // o rcx: rcx is zero for method calls and non-zero for function calls.
-#ifdef _WIN64
- const int kNumSavedRegisters = 1;
-
- __ push(rcx);
-#else
- const int kNumSavedRegisters = 3;
-
- __ push(rcx);
- __ push(rdi);
- __ push(rsi);
-#endif
+ // This stub can be called from essentially anywhere, so it needs to save
+ // all volatile and callee-save registers.
+ const size_t kNumSavedRegisters = 2;
+ __ push(arg_reg_1);
+ __ push(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
-#ifdef _WIN64
- __ lea(rdx, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
-#else
- __ lea(rsi, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
-#endif
+ __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
// Calculate the function address to the first arg.
-#ifdef _WIN64
- __ movq(rcx, Operand(rsp, kNumSavedRegisters * kPointerSize));
- __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
-#else
- __ movq(rdi, Operand(rsp, kNumSavedRegisters * kPointerSize));
- __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
-#endif
+ __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
+ __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+
+ // Save the remainder of the volatile registers.
+ masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ movq(rax, &entry_hook_, RelocInfo::NONE64);
- __ movq(rax, Operand(rax, 0));
+ __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ RelocInfo::NONE64);
AllowExternalCallThatCantCauseGC scope(masm);
@@ -6780,13 +6710,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ CallCFunction(rax, kArgumentCount);
// Restore volatile regs.
-#ifdef _WIN64
- __ pop(rcx);
-#else
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rcx);
-#endif
+ masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
+ __ pop(arg_reg_2);
+ __ pop(arg_reg_1);
__ Ret();
}
@@ -6816,8 +6742,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
// rdx - kind
// rax - number of arguments
// rdi - constructor?
- // esp[0] - return address
- // esp[4] - last argument
+ // rsp[0] - return address
+ // rsp[8] - last argument
ASSERT(FAST_SMI_ELEMENTS == 0);
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ASSERT(FAST_ELEMENTS == 2);
@@ -6845,6 +6771,10 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &normal_sequence);
+ // The type cell may have gone megamorphic, don't overwrite if so
+ __ movq(rcx, FieldOperand(rbx, kPointerSize));
+ __ JumpIfNotSmi(rcx, &normal_sequence);
+
// Save the resulting elements kind in type info
__ Integer32ToSmi(rdx, rdx);
__ movq(FieldOperand(rbx, kPointerSize), rdx);
@@ -6877,7 +6807,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, true);
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -6911,11 +6841,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
- // -- rbx : type info cell
- // -- rdi : constructor
+ // -- rax : argc
+ // -- rbx : type info cell
+ // -- rdi : constructor
// -- rsp[0] : return address
- // -- rsp[4] : last argument
+ // -- rsp[8] : last argument
// -----------------------------------
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
@@ -6934,63 +6864,49 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
- // We should either have undefined in ebx or a valid jsglobalpropertycell
+ // We should either have undefined in rbx or a valid cell
Label okay_here;
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), global_property_cell_map);
+ __ Cmp(FieldOperand(rbx, 0), cell_map);
__ Assert(equal, "Expected property cell in register rbx");
__ bind(&okay_here);
}
- if (FLAG_optimize_constructed_arrays) {
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
- __ JumpIfNotSmi(rdx, &no_info);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ movq(rdx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ Label no_info, switch_ready;
+ // Get the elements kind and case on that.
+ __ Cmp(rbx, undefined_sentinel);
+ __ j(equal, &no_info);
+ __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
+ __ JumpIfNotSmi(rdx, &no_info);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&switch_ready);
+ __ bind(&no_info);
+ __ movq(rdx, Immediate(GetInitialFastElementsKind()));
+ __ bind(&switch_ready);
+
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ UNREACHABLE();
}
}
@@ -7033,11 +6949,11 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : argc
- // -- ebx : type info cell
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
+ // -- rax : argc
+ // -- rbx : type info cell
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
// -----------------------------------
if (FLAG_debug_code) {
@@ -7054,46 +6970,33 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Check(equal, "Unexpected initial map for Array function");
}
- if (FLAG_optimize_constructed_arrays) {
- // Figure out the right elements kind
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ shr(rcx, Immediate(Map::kElementsKindShift));
+ // Figure out the right elements kind
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
- Label done;
- __ cmpl(rcx, Immediate(FAST_ELEMENTS));
- __ j(equal, &done);
- __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
- __ Assert(equal,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
- __ bind(&done);
- }
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ shr(rcx, Immediate(Map::kElementsKindShift));
- Label fast_elements_case;
+ if (FLAG_debug_code) {
+ Label done;
__ cmpl(rcx, Immediate(FAST_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ j(equal, &done);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ __ bind(&done);
+ }
- __ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ Label fast_elements_case;
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
}
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 2ac56a144c..9643872a8c 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "macro-assembler.h"
@@ -346,7 +346,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
@@ -381,7 +381,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Conversion loop.
__ bind(&loop);
__ movq(rbx,
- FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+ FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
@@ -459,7 +459,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ movq(r14, FieldOperand(r8,
r9,
- times_pointer_size,
+ times_8,
FixedDoubleArray::kHeaderSize));
// r9 : current element's index
// r14: current element
@@ -735,7 +735,11 @@ void Code::PatchPlatformCodeAge(byte* sequence,
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start());
- patcher.masm()->nop();
+ for (int i = 0;
+ i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength;
+ i++) {
+ patcher.masm()->nop();
+ }
}
}
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 80e22c6297..96c5330832 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -33,7 +33,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "cpu.h"
#include "macro-assembler.h"
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 750d929267..a337b0d052 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "assembler.h"
#include "codegen.h"
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 21682c2708..f2f7ed0735 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "deoptimizer.h"
@@ -548,7 +548,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index fb0914d7d0..d787775047 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -31,7 +31,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "disasm.h"
#include "lazy-instance.h"
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index a811a34ba9..5cc27a6e12 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -27,22 +27,17 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "assembler.h"
#include "assembler-x64.h"
#include "assembler-x64-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index e9fe2a8cd2..9ad7f586b6 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "code-stubs.h"
#include "codegen.h"
@@ -118,7 +118,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -308,7 +308,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ SmiAddConstant(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(-delta));
}
@@ -323,8 +323,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
RelocInfo::NONE64);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- kScratchRegister);
+ __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
@@ -338,7 +337,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -379,7 +378,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -1128,14 +1127,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(rbx, cell);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
@@ -1666,10 +1663,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ Move(rcx, key->handle());
+ __ Move(rcx, key->value());
__ movq(rdx, Operand(rsp, 0));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1809,13 +1806,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax);
+ __ push(rax); // array literal
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1824,7 +1819,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
__ movq(FieldOperand(rbx, offset), result_register());
@@ -1835,10 +1830,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ Move(rcx, Smi::FromInt(i));
- __ Move(rdx, Smi::FromInt(expr->literal_index()));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1847,6 +1839,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ addq(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1974,22 +1967,39 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::INITIAL:
- case Yield::SUSPEND: {
- VisitForStackValue(expr->generator_object());
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(continuation.pos()));
+ __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movq(rcx, rsi);
+ __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+ kDontSaveFPRegs);
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpq(rsp, rbx);
+ __ j(equal, &post_runtime);
+ __ push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ movq(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
- Label resume;
- __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &resume);
- if (expr->yield_kind() == Yield::SUSPEND) {
- EmitReturnIteratorResult(false);
- } else {
- __ pop(result_register());
- EmitReturnSequence();
- }
+ __ pop(result_register());
+ EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -2001,7 +2011,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Move(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- EmitReturnIteratorResult(true);
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
break;
}
@@ -2012,75 +2025,68 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
- Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ jmp(&l_next);
- // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); // iter
- __ push(rcx); // iter
- __ push(rax); // exception
- __ movq(rax, rcx); // iter
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(throw_ic); // iter.throw in rax
+ __ push(rcx);
+ __ push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ push(rax); // exception
__ jmp(&l_call);
- // try { received = yield result.value }
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
__ bind(&l_try);
- __ pop(rax); // result.value
+ __ pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result.value
- __ push(Operand(rsp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ push(rax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ movq(rax, Operand(rsp, generator_object_depth));
+ __ push(rax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(l_continuation.pos()));
+ __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movq(rcx, rsi);
+ __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+ kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ movq(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &l_resume);
- EmitReturnIteratorResult(false);
+ __ pop(rax); // result
+ EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
- // receiver = iter; f = iter.next; arg = received;
+ // receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); // iter
- __ push(rcx); // iter
- __ push(rax); // received
- __ movq(rax, rcx); // iter
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(next_ic); // iter.next in rax
+ __ push(rcx);
+ __ push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ push(rax); // received
- // result = f.call(receiver, arg);
+ // result = receiver[f](arg);
__ bind(&l_call);
- Label l_call_runtime;
- __ JumpIfSmi(rax, &l_call_runtime);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &l_call_runtime);
- __ movq(rdi, rax);
- ParameterCount count(1);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&l_loop);
- __ bind(&l_call_runtime);
- __ push(rax);
- __ CallRuntime(Runtime::kCall, 3);
+ __ Drop(1); // The key is still on the stack; drop it.
- // val = result.value; if (!result.done) goto l_try;
+ // if (!result.done) goto l_try;
__ bind(&l_loop);
- // result.value
__ push(rax); // save result
- __ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in rax
- __ pop(rbx); // result
- __ push(rax); // result.value
- __ movq(rax, rbx); // result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in rax
@@ -2090,7 +2096,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result.value
+ __ pop(rax); // result
+ __ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
}
@@ -2195,13 +2204,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
-void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map());
__ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
@@ -2222,33 +2238,13 @@ void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
// root set.
__ RecordWriteField(rax, JSGeneratorObject::kResultValuePropertyOffset,
rcx, rdx, kDontSaveFPRegs);
-
- if (done) {
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- }
-
- EmitReturnSequence();
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movq(context_register(),
- Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&allocated);
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
+ __ Move(rcx, key->value());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -2367,7 +2363,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ movq(rdx, rax);
__ pop(rax); // Restore value.
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2490,7 +2486,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -2620,8 +2616,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ Move(rbx, cell);
@@ -2751,7 +2746,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2804,8 +2799,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ Move(rbx, cell);
@@ -3357,7 +3351,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3778,7 +3772,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
@@ -4516,7 +4510,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index efb41c85ec..a8de443940 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "ic-inl.h"
@@ -337,7 +337,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
__ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string); // The value in hash is used at jump target.
- // Is the string internalized?
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
STATIC_ASSERT(kInternalizedTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(kIsInternalizedMask));
@@ -711,10 +712,10 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -869,14 +870,14 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc,
Label* miss) {
// ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rdi : function
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
__ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function.
@@ -893,13 +894,13 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// The generated code falls through if the call should be handled by runtime.
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
@@ -923,13 +924,13 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
IC::UtilityId id,
Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Counters* counters = masm->isolate()->counters();
@@ -995,13 +996,13 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1013,13 +1014,13 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1124,13 +1125,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Check if the name is really a name.
@@ -1229,7 +1230,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, notin;
Operand mapped_location =
@@ -1252,10 +1253,10 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
@@ -1292,13 +1293,13 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label slow, notin;
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1384,7 +1385,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1401,7 +1402,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Counters* counters = masm->isolate()->counters();
@@ -1425,7 +1426,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1502,8 +1503,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1526,10 +1527,10 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1547,10 +1548,10 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1567,10 +1568,10 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1587,10 +1588,10 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
@@ -1610,9 +1611,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
@@ -1635,9 +1636,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index f423133cf1..de43f86a3d 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
@@ -67,7 +67,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
@@ -92,20 +92,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
- for (int i = 0 ; i < transition_maps_.length(); i++) {
- transition_maps_.at(i)->AddDependentCode(
- DependentCode::kTransitionGroup, code);
- }
- if (graph()->depends_on_empty_array_proto_elements()) {
- isolate()->initial_object_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- isolate()->initial_array_prototype()->map()->AddDependentCode(
- DependentCode::kElementsCantBeAddedGroup, code);
- }
+ info()->CommitDependencies(code);
}
@@ -482,27 +469,15 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
+ WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
@@ -534,60 +509,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
}
+ continue;
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
+ environment->HasUint32ValueAt(i));
}
}
@@ -595,17 +539,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ bool is_uint32) {
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -710,8 +645,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -719,7 +652,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -746,7 +679,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
- if (FLAG_trap_on_deopt) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
@@ -1040,7 +973,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ // Record the address of the first unknown OSR value as the place to enter.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
@@ -1074,12 +1008,12 @@ void LCodeGen::DoModI(LModI* instr) {
__ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
- } else if (hmod->has_fixed_right_arg()) {
+ } else if (hmod->fixed_right_arg().has_value) {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- int32_t divisor = hmod->fixed_right_arg_value();
+ int32_t divisor = hmod->fixed_right_arg().value;
ASSERT(IsPowerOf2(divisor));
// Check if our assumption of a fixed right operand still holds.
@@ -1619,13 +1553,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
@@ -1652,8 +1579,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done, Label::kNear);
+ }
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
@@ -1757,10 +1687,11 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ __ leal(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- __ lea(ToRegister(instr->result()), address);
+ __ leal(ToRegister(instr->result()), address);
}
} else {
if (right->IsConstantOperand()) {
@@ -1901,10 +1832,12 @@ int LCodeGen::GetNextEmittedBlock() const {
}
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
int next_block = GetNextEmittedBlock();
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
@@ -1927,26 +1860,23 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testl(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testq(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -1954,43 +1884,52 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (type.IsBoolean()) {
ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
} else if (type.IsSmi()) {
ASSERT(!info()->IsStub());
__ SmiCompare(reg, Smi::FromInt(0));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, instr->TrueLabel(chunk_));
// false -> false.
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
@@ -2005,14 +1944,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
+ __ j(not_zero, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
+ __ j(above_equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2021,8 +1960,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
@@ -2033,13 +1972,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear);
__ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
@@ -2088,24 +2030,21 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ __ j(parity_even, instr->FalseLabel(chunk_));
} else {
int32_t value;
if (right->IsConstantOperand()) {
@@ -2144,15 +2083,13 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
}
}
- EmitBranch(true_block, false_block, cc);
+ EmitBranch(instr, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->right()->IsConstantOperand()) {
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
@@ -2161,17 +2098,15 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register right = ToRegister(instr->right());
__ cmpq(left, right);
}
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmpq(left, Immediate(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2203,21 +2138,21 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ Condition true_cond = EmitIsObject(
+ reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
return cond;
@@ -2228,20 +2163,18 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond = EmitIsString(reg, temp, false_label);
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Condition is_smi;
if (instr->value()->IsRegister()) {
Register input = ToRegister(instr->value());
@@ -2250,7 +2183,7 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->value());
is_smi = masm()->CheckSmi(input);
}
- EmitBranch(true_block, false_block, is_smi);
+ EmitBranch(instr, is_smi);
}
@@ -2258,21 +2191,18 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
}
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2280,7 +2210,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Condition condition = TokenToCondition(op, false);
__ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -2307,15 +2237,12 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2335,12 +2262,9 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2418,25 +2342,18 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2490,9 +2407,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = ToRegister(instr->temp());
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
__ cmpq(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
@@ -2661,7 +2577,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<Cell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2671,14 +2587,14 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// We have a temp because CompareRoot might clobber kScratchRegister.
Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
__ movq(Operand(cell, 0), value);
} else {
// Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
__ movq(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
@@ -2733,9 +2649,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ movq(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->temp());
__ RecordWriteContextSlot(context,
@@ -3939,11 +3855,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
- }
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_value);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3952,22 +3866,42 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(FLAG_optimize_constructed_arrays);
__ Set(rax, instr->arity());
__ Move(rbx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
- bool disable_allocation_sites =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
- ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ movq(rcx, Operand(rsp, 0));
+ __ testq(rcx, rcx);
+ __ j(zero, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4025,9 +3959,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
if (!transition.is_null()) {
- if (transition->CanBeDeprecated()) {
- transition_maps_.Add(transition, info()->zone());
- }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
@@ -4046,9 +3977,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register write_register = object;
if (!access.IsInobject()) {
@@ -4287,9 +4218,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
__ lea(key_reg, operand);
@@ -4955,9 +4886,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ Condition cc = masm()->CheckSmi(ToRegister(input));
+ DeoptimizeIf(cc, instr->environment());
+ }
}
@@ -5100,11 +5033,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
@@ -5113,6 +5042,94 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+ TAG_OBJECT);
+
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(scratch, constructor);
+ __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+ Immediate(instance_size >> kPointerSizeLog2));
+ __ Assert(equal, "Unexpected instance size");
+ __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+ Immediate(initial_map->pre_allocated_property_fields()));
+ __ Assert(equal, "Unexpected pre-allocated property fields count");
+ __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+ Immediate(initial_map->unused_property_fields()));
+ __ Assert(equal, "Unexpected unused property fields count");
+ __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+ Immediate(initial_map->inobject_properties()));
+ __ Assert(equal, "Unexpected in-object property fields count");
+ }
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), map);
+ __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ if (initial_map->inobject_properties() != 0) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ movq(FieldOperand(result, property_offset), scratch);
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+ int instance_size = initial_map->instance_size();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ PushSafepointRegistersScope scope(this);
+ __ Push(Smi::FromInt(instance_size));
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5288,15 +5305,12 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_), input, instr->type_literal());
if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -5379,11 +5393,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -5535,15 +5547,15 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ // Normally we record the first unknown OSR value as the entrypoint to the OSR
+ // code, but if there were none, record the entrypoint here.
+ if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 07a948c113..c89ec1fd0e 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -57,8 +57,6 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -81,7 +79,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
- // TODO(svenpanne) Use this consistently.
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -130,6 +127,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -141,10 +139,7 @@ class LCodeGen BASE_EMBEDDED {
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -255,10 +250,7 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -290,7 +282,8 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
XMMRegister result,
@@ -319,7 +312,8 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
@@ -362,8 +356,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index fd74e0aacd..aed4f36647 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "x64/lithium-gap-resolver-x64.h"
#include "x64/lithium-codegen-x64.h"
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index cb0659d24d..95a44f0384 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
@@ -43,31 +43,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
@@ -354,8 +329,7 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
+ ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -456,7 +430,7 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -937,7 +911,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -948,13 +922,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
@@ -966,6 +942,21 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -997,10 +988,13 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
+ ToBooleanStub::Types expected = instr->expected_input_types();
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
@@ -1270,7 +1264,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(constructor);
@@ -1368,19 +1361,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
@@ -1452,7 +1432,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->has_fixed_right_arg()) {
+ } else if (instr->fixed_right_arg().has_value) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegisterAtStart(right),
NULL);
@@ -1738,13 +1718,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1932,7 +1905,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
@@ -2353,6 +2326,13 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
+ LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2525,8 +2505,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->inlining_kind(),
instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 1121af50e1..a7530be14e 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -98,7 +99,6 @@ class LCodeGen;
V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
- V(FixedArrayBaseLength) \
V(MapEnumLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -489,17 +489,44 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
@@ -1190,7 +1217,7 @@ class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LCmpMapAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1201,29 +1228,7 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
@@ -2359,6 +2364,20 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LAllocateObject(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2463,26 +2482,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry();
+ LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2590,7 +2593,6 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 46e2c694e8..e5bee67bb4 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -27,10 +27,11 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
#include "serialize.h"
@@ -645,8 +646,8 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
+ // -- rsp[0] : return address
+ // -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
// -----------------------------------
@@ -697,6 +698,8 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset) {
@@ -737,9 +740,29 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ cmpb(Operand(rax, 0), Immediate(0));
+ j(zero, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
+ movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
// Call the api function!
movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::EXTERNAL_REFERENCE);
+
+ bind(&end_profiler_check);
+
+ // Call the api function!
call(rax);
if (FLAG_log_timer_events) {
@@ -2286,6 +2309,32 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
}
+template<class T>
+static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
+ T operand_or_register,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+ masm->cmpb(operand_or_register, Immediate(kInternalizedTag));
+ masm->j(less, not_unique_name, distance);
+ masm->cmpb(operand_or_register, Immediate(SYMBOL_TYPE));
+ masm->j(greater, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
+}
+
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
@@ -2357,9 +2406,8 @@ void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ movq(result, cell, RelocInfo::CELL);
movq(result, Operand(result, 0));
} else {
Move(result, object);
@@ -2370,9 +2418,8 @@ void MacroAssembler::LoadHeapObject(Register result,
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ movq(kScratchRegister, cell, RelocInfo::CELL);
cmpq(reg, Operand(kScratchRegister, 0));
} else {
Cmp(reg, object);
@@ -2383,9 +2430,8 @@ void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ movq(kScratchRegister, cell, RelocInfo::CELL);
movq(kScratchRegister, Operand(kScratchRegister, 0));
push(kScratchRegister);
} else {
@@ -2394,13 +2440,12 @@ void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
}
-void MacroAssembler::LoadGlobalCell(Register dst,
- Handle<JSGlobalPropertyCell> cell) {
+void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
if (dst.is(rax)) {
AllowDeferredHandleDereference embedding_raw_address;
- load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(dst, cell, RelocInfo::CELL);
movq(dst, Operand(dst, 0));
}
}
@@ -2643,7 +2688,8 @@ void MacroAssembler::JumpToHandlerEntry() {
// rax = exception, rdi = code object, rdx = state.
movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+ movq(rdx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
@@ -4171,12 +4217,12 @@ void MacroAssembler::CopyBytes(Register destination,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
movq(scratch, length);
- shrl(length, Immediate(3));
+ shrl(length, Immediate(kPointerSizeLog2));
repmovsq();
// Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
+ andl(scratch, Immediate(kPointerSize - 1));
+ movq(length, Operand(source, scratch, times_1, -kPointerSize));
+ movq(Operand(destination, scratch, times_1, -kPointerSize), length);
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index c10cbc65fe..124153b52d 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -758,6 +758,12 @@ class MacroAssembler: public Assembler {
Label* on_fail,
Label::Distance near_jump = Label::kFar);
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -810,7 +816,7 @@ class MacroAssembler: public Assembler {
}
// Load a global cell into a register.
- void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+ void LoadGlobalCell(Register dst, Handle<Cell> cell);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
@@ -1239,12 +1245,14 @@ class MacroAssembler: public Assembler {
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
void CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
int stack_space,
bool returns_handle,
int return_value_offset_from_rbp);
// Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
// etc., not pushed. The argument count assumes all arguments are word sized.
// The number of slots reserved for arguments depends on platform. On Windows
// stack slots are reserved for the arguments passed in registers. On other
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index efb2a65a5f..106ffb76da 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
+#include "cpu-profiler.h"
#include "serialize.h"
#include "unicode.h"
#include "log.h"
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 06d8f7108b..4b3ee400f3 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "ic-inl.h"
#include "codegen.h"
@@ -53,7 +53,7 @@ static void ProbeTable(Isolate* isolate,
ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
ScaleFactor scale_factor = times_2;
- ASSERT_EQ(24, sizeof(StubCache::Entry));
+ ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
@@ -171,8 +171,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
USE(extra2); // The register extra2 is not used on the X64 platform.
USE(extra3); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- ASSERT(sizeof(Entry) == 24);
+ // entry size being 3 * kPointerSize.
+ ASSERT(sizeof(Entry) == 3 * kPointerSize);
// Make sure the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -423,10 +423,11 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
+ // -- rsp[0] : return address.
+ // -- rsp[8] : last fast api call extra argument.
// -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra argument.
+ // -- rsp[kFastApiCallArguments * 8] : first fast api call extra
+ // argument.
// -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
// frame.
// -----------------------------------
@@ -491,11 +492,14 @@ static void GenerateFastApiCall(MacroAssembler* masm,
#if defined(__MINGW64__)
Register arguments_arg = rcx;
+ Register callback_arg = rdx;
#elif defined(_WIN64)
// Win64 uses first register--rcx--for returned value.
Register arguments_arg = returns_handle ? rdx : rcx;
+ Register callback_arg = returns_handle ? r8 : rdx;
#else
Register arguments_arg = rdi;
+ Register callback_arg = rsi;
#endif
// Allocate the v8::Arguments structure in the arguments' space since
@@ -514,7 +518,13 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeInvocationCallback)
+ : FUNCTION_ADDR(&InvokeFunctionCallback);
+
__ CallApiFunctionAndReturn(function_address,
+ thunk_address,
+ callback_arg,
argc + kFastApiCallArguments + 1,
returns_handle,
kFastApiCallArguments + 1);
@@ -737,11 +747,11 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<Name> name,
Register scratch,
Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
+ Handle<PropertyCell> cell =
GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+ __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
}
@@ -817,7 +827,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register storage_reg = name_reg;
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (details.type() == CONSTANT_FUNCTION) {
+ Handle<HeapObject> constant(
+ HeapObject::cast(descriptors->GetValue(descriptor)));
+ __ LoadHeapObject(scratch1, constant);
+ __ cmpq(value_reg, scratch1);
+ __ j(not_equal, miss_restore_name);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
@@ -844,7 +860,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (object->map()->unused_property_fields() == 0) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch1); // Return address.
@@ -873,6 +890,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
+ if (details.type() == CONSTANT_FUNCTION) {
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
+ return;
+ }
+
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
@@ -1293,8 +1316,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2().is(reg));
- __ pop(scratch2()); // Get return address to place it below.
+ ASSERT(!scratch4().is(reg));
+ __ pop(scratch4()); // Get return address to place it below.
__ push(receiver()); // receiver
__ push(reg); // holder
@@ -1318,20 +1341,23 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
!CallbackTable::ReturnsVoid(isolate(), getter_address);
#if defined(__MINGW64__)
+ Register getter_arg = r8;
Register accessor_info_arg = rdx;
Register name_arg = rcx;
#elif defined(_WIN64)
// Win64 uses first register--rcx--for returned value.
+ Register getter_arg = returns_handle ? r9 : r8;
Register accessor_info_arg = returns_handle ? r8 : rdx;
Register name_arg = returns_handle ? rdx : rcx;
#else
+ Register getter_arg = rdx;
Register accessor_info_arg = rsi;
Register name_arg = rdi;
#endif
- ASSERT(!name_arg.is(scratch2()));
+ ASSERT(!name_arg.is(scratch4()));
__ movq(name_arg, rsp);
- __ push(scratch2()); // Restore return address.
+ __ push(scratch4()); // Restore return address.
// v8::Arguments::values_ and handler for name.
const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -1350,7 +1376,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
+ Address thunk_address = returns_handle
+ ? FUNCTION_ADDR(&InvokeAccessorGetter)
+ : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+
__ CallApiFunctionAndReturn(getter_address,
+ thunk_address,
+ getter_arg,
kStackSpace,
returns_handle,
5);
@@ -1485,12 +1517,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1581,12 +1613,59 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+ Handle<Cell> kind_feedback_cell =
+ isolate()->factory()->NewCell(kind);
+ __ movq(rax, Immediate(argc));
+ __ Move(rbx, kind_feedback_cell);
+ __ Move(rdi, function);
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1827,16 +1906,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1908,16 +1988,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1988,16 +2069,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -2068,16 +2150,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -2139,16 +2222,17 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// TODO(872): implement this.
return Handle<Code>::null();
}
@@ -2157,9 +2241,10 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -2255,7 +2340,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2263,7 +2348,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
@@ -2445,8 +2530,9 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<JSFunction> function) {
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, Handle<String>::cast(name));
+ Handle<PropertyCell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT_FUNCTION);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2525,7 +2611,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
// ----------- S t a t e -------------
@@ -2540,7 +2626,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name));
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2708,7 +2795,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name) {
Label miss;
@@ -2720,7 +2807,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Compute the cell operand to use.
__ Move(scratch1(), cell);
Operand cell_operand =
- FieldOperand(scratch1(), JSGlobalPropertyCell::kValueOffset);
+ FieldOperand(scratch1(), PropertyCell::kValueOffset);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
@@ -2888,7 +2975,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label success, miss;
@@ -2902,7 +2989,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -2996,7 +3083,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
@@ -3004,7 +3091,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
@@ -3039,10 +3126,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, miss_force_generic;
@@ -3199,10 +3286,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ bind(&slow);
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index e312b20899..49e7626f74 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -40,7 +40,6 @@ namespace internal {
inline void* Zone::New(int size) {
- ASSERT(scope_nesting_ > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -75,7 +74,7 @@ T* Zone::NewArray(int length) {
bool Zone::excess_allocation() {
- return segment_bytes_allocated_ > zone_excess_limit_;
+ return segment_bytes_allocated_ > kExcessLimit;
}
@@ -110,17 +109,6 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
-ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
- : zone_(zone), mode_(mode) {
- zone_->scope_nesting_++;
-}
-
-
-bool ZoneScope::ShouldDeleteOnExit() {
- return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 51b8113a0d..2a0a0e2846 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -68,39 +68,20 @@ class Segment {
Zone::Zone(Isolate* isolate)
- : zone_excess_limit_(256 * MB),
+ : allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
- scope_nesting_(0),
segment_head_(NULL),
isolate_(isolate) {
}
-unsigned Zone::allocation_size_ = 0;
-ZoneScope::~ZoneScope() {
- if (ShouldDeleteOnExit()) zone_->DeleteAll();
- zone_->scope_nesting_--;
-}
+Zone::~Zone() {
+ DeleteAll();
+ DeleteKeptSegment();
-// Creates a new segment, sets it size, and pushes it to the front
-// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->Initialize(segment_head_, size);
- segment_head_ = result;
- }
- return result;
-}
-
-
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, int size) {
- adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
+ ASSERT(segment_bytes_allocated_ == 0);
}
@@ -118,8 +99,7 @@ void Zone::DeleteAll() {
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
- Segment* current = segment_head_;
- while (current != NULL) {
+ for (Segment* current = segment_head_; current != NULL; ) {
Segment* next = current->next();
if (current == keep) {
// Unlink the segment we wish to keep from the list.
@@ -157,10 +137,43 @@ void Zone::DeleteAll() {
void Zone::DeleteKeptSegment() {
+#ifdef DEBUG
+ // Constant byte value used for zapping dead memory in debug mode.
+ static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+ ASSERT(segment_head_ == NULL || segment_head_->next() == NULL);
if (segment_head_ != NULL) {
- DeleteSegment(segment_head_, segment_head_->size());
+ int size = segment_head_->size();
+#ifdef DEBUG
+ // Zap the entire kept segment (including the header).
+ memset(segment_head_, kZapDeadByte, size);
+#endif
+ DeleteSegment(segment_head_, size);
segment_head_ = NULL;
}
+
+ ASSERT(segment_bytes_allocated_ == 0);
+}
+
+
+// Creates a new segment, sets it size, and pushes it to the front
+// of the segment chain. Returns the new segment.
+Segment* Zone::NewSegment(int size) {
+ Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ adjust_segment_bytes_allocated(size);
+ if (result != NULL) {
+ result->Initialize(segment_head_, size);
+ segment_head_ = result;
+ }
+ return result;
+}
+
+
+// Deletes the given segment. Does not touch the segment chain.
+void Zone::DeleteSegment(Segment* segment, int size) {
+ adjust_segment_bytes_allocated(-size);
+ Malloced::Delete(segment);
}
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 01e887e779..a12ed79312 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -39,13 +39,6 @@ namespace v8 {
namespace internal {
-// Zone scopes are in one of two modes. Either they delete the zone
-// on exit or they do not.
-enum ZoneScopeMode {
- DELETE_ON_EXIT,
- DONT_DELETE_ON_EXIT
-};
-
class Segment;
class Isolate;
@@ -65,7 +58,7 @@ class Isolate;
class Zone {
public:
explicit Zone(Isolate* isolate);
- ~Zone() { DeleteKeptSegment(); }
+ ~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
inline void* New(int size);
@@ -77,7 +70,8 @@ class Zone {
// small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
void DeleteAll();
- // Deletes the last small segment kept around by DeleteAll().
+ // Deletes the last small segment kept around by DeleteAll(). You
+ // may no longer allocate in the Zone after a call to this method.
void DeleteKeptSegment();
// Returns true if more memory has been allocated in zones than
@@ -86,13 +80,12 @@ class Zone {
inline void adjust_segment_bytes_allocated(int delta);
- inline Isolate* isolate() { return isolate_; }
+ inline unsigned allocation_size() { return allocation_size_; }
- static unsigned allocation_size_;
+ inline Isolate* isolate() { return isolate_; }
private:
friend class Isolate;
- friend class ZoneScope;
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
@@ -109,7 +102,10 @@ class Zone {
static const int kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit.
- int zone_excess_limit_;
+ static const int kExcessLimit = 256 * MB;
+
+ // The number of bytes allocated in this zone so far.
+ unsigned allocation_size_;
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
@@ -124,10 +120,10 @@ class Zone {
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
- Segment* NewSegment(int size);
+ INLINE(Segment* NewSegment(int size));
// Deletes the given segment. Does not touch the segment chain.
- void DeleteSegment(Segment* segment, int size);
+ INLINE(void DeleteSegment(Segment* segment, int size));
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
@@ -135,8 +131,6 @@ class Zone {
Address position_;
Address limit_;
- int scope_nesting_;
-
Segment* segment_head_;
Isolate* isolate_;
};
@@ -162,6 +156,20 @@ class ZoneObject {
};
+// The ZoneScope is used to automatically call DeleteAll() on a
+// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
+struct ZoneScope {
+ public:
+ explicit ZoneScope(Zone* zone) : zone_(zone) { }
+ ~ZoneScope() { zone_->DeleteAll(); }
+
+ Zone* zone() { return zone_; }
+
+ private:
+ Zone* zone_;
+};
+
+
// The ZoneAllocationPolicy is used to specialize generic data
// structures to allocate themselves and their elements in the Zone.
struct ZoneAllocationPolicy {
@@ -229,31 +237,6 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
};
-// ZoneScopes keep track of the current parsing and compilation
-// nesting and cleans up generated ASTs in the Zone when exiting the
-// outer-most scope.
-class ZoneScope BASE_EMBEDDED {
- public:
- INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
-
- virtual ~ZoneScope();
-
- inline bool ShouldDeleteOnExit();
-
- // For ZoneScopes that do not delete on exit by default, call this
- // method to request deletion on exit.
- void DeleteOnExit() {
- mode_ = DELETE_ON_EXIT;
- }
-
- inline static int nesting();
-
- private:
- Zone* zone_;
- ZoneScopeMode mode_;
-};
-
-
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.