summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/ChangeLog73
-rw-r--r--deps/v8/SConstruct14
-rw-r--r--deps/v8/include/v8-profiler.h32
-rw-r--r--deps/v8/include/v8.h40
-rw-r--r--deps/v8/node_cygwin_patch.diff918
-rwxr-xr-xdeps/v8/src/SConscript5
-rwxr-xr-xdeps/v8/src/SConscript.orig324
-rw-r--r--deps/v8/src/accessors.h6
-rw-r--r--deps/v8/src/api.cc55
-rw-r--r--deps/v8/src/arm/assembler-arm.cc76
-rw-r--r--deps/v8/src/arm/assembler-arm.h15
-rw-r--r--deps/v8/src/arm/builtins-arm.cc29
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc4688
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h491
-rw-r--r--deps/v8/src/arm/codegen-arm.cc5297
-rw-r--r--deps/v8/src/arm/codegen-arm.h522
-rw-r--r--deps/v8/src/arm/constants-arm.h27
-rw-r--r--deps/v8/src/arm/debug-arm.cc97
-rw-r--r--deps/v8/src/arm/disasm-arm.cc22
-rw-r--r--deps/v8/src/arm/frames-arm.cc73
-rw-r--r--deps/v8/src/arm/frames-arm.h5
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc814
-rw-r--r--deps/v8/src/arm/ic-arm.cc127
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc263
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h31
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc18
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h16
-rw-r--r--deps/v8/src/arm/simulator-arm.cc89
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc141
-rw-r--r--deps/v8/src/array.js15
-rw-r--r--deps/v8/src/ast-inl.h3
-rw-r--r--deps/v8/src/ast.cc627
-rw-r--r--deps/v8/src/ast.h371
-rw-r--r--deps/v8/src/bootstrapper.cc11
-rw-r--r--deps/v8/src/builtins.cc115
-rw-r--r--deps/v8/src/builtins.h7
-rw-r--r--deps/v8/src/char-predicates-inl.h12
-rw-r--r--deps/v8/src/circular-queue.cc5
-rw-r--r--deps/v8/src/code-stubs.h622
-rw-r--r--deps/v8/src/codegen.cc27
-rw-r--r--deps/v8/src/codegen.h619
-rw-r--r--deps/v8/src/compilation-cache.cc45
-rw-r--r--deps/v8/src/compilation-cache.h4
-rwxr-xr-xdeps/v8/src/compiler.cc78
-rw-r--r--deps/v8/src/contexts.h8
-rw-r--r--deps/v8/src/conversions.cc9
-rw-r--r--deps/v8/src/conversions.h6
-rw-r--r--deps/v8/src/cpu-profiler.cc18
-rw-r--r--deps/v8/src/d8.cc2
-rw-r--r--deps/v8/src/data-flow.cc278
-rw-r--r--deps/v8/src/data-flow.h76
-rw-r--r--deps/v8/src/date.js16
-rw-r--r--deps/v8/src/dateparser.h6
-rw-r--r--deps/v8/src/debug.cc16
-rw-r--r--deps/v8/src/debug.h16
-rw-r--r--deps/v8/src/disassembler.cc7
-rw-r--r--deps/v8/src/execution.cc2
-rw-r--r--deps/v8/src/flag-definitions.h2
-rw-r--r--deps/v8/src/flags.h2
-rw-r--r--deps/v8/src/flow-graph.cc763
-rw-r--r--deps/v8/src/flow-graph.h180
-rw-r--r--deps/v8/src/frames-inl.h15
-rw-r--r--deps/v8/src/frames.cc215
-rw-r--r--deps/v8/src/frames.h102
-rw-r--r--deps/v8/src/full-codegen.cc764
-rw-r--r--deps/v8/src/full-codegen.h215
-rw-r--r--deps/v8/src/func-name-inferrer.cc14
-rw-r--r--deps/v8/src/func-name-inferrer.h54
-rw-r--r--deps/v8/src/globals.h12
-rw-r--r--deps/v8/src/handles.cc2
-rw-r--r--deps/v8/src/heap-inl.h3
-rw-r--r--deps/v8/src/heap-profiler.cc444
-rw-r--r--deps/v8/src/heap-profiler.h64
-rw-r--r--deps/v8/src/heap.cc209
-rw-r--r--deps/v8/src/heap.h249
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc148
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc4615
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h376
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc4805
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h338
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc87
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc64
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1210
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc56
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc259
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h43
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc27
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc4
-rw-r--r--deps/v8/src/ic-inl.h4
-rw-r--r--deps/v8/src/ic.cc6
-rw-r--r--deps/v8/src/ic.h4
-rw-r--r--deps/v8/src/json.js16
-rw-r--r--deps/v8/src/jsregexp.cc17
-rw-r--r--deps/v8/src/jump-target-heavy.h10
-rw-r--r--deps/v8/src/liveedit.cc50
-rw-r--r--deps/v8/src/log.cc4
-rw-r--r--deps/v8/src/macro-assembler.h27
-rw-r--r--deps/v8/src/macros.py2
-rw-r--r--deps/v8/src/mark-compact.cc317
-rw-r--r--deps/v8/src/mark-compact.h46
-rw-r--r--deps/v8/src/memory.h4
-rw-r--r--deps/v8/src/messages.js2
-rw-r--r--deps/v8/src/mips/builtins-mips.cc2
-rw-r--r--deps/v8/src/mips/codegen-mips.h3
-rw-r--r--deps/v8/src/objects-debug.cc35
-rw-r--r--deps/v8/src/objects-inl.h151
-rw-r--r--deps/v8/src/objects-visiting.cc4
-rw-r--r--deps/v8/src/objects-visiting.h24
-rw-r--r--deps/v8/src/objects.cc298
-rw-r--r--deps/v8/src/objects.h277
-rw-r--r--deps/v8/src/parser.cc575
-rw-r--r--deps/v8/src/parser.h112
-rw-r--r--deps/v8/src/platform-cygwin.cc858
-rw-r--r--deps/v8/src/platform-freebsd.cc5
-rw-r--r--deps/v8/src/platform-openbsd.cc5
-rw-r--r--deps/v8/src/platform.h14
-rw-r--r--deps/v8/src/platform.h.orig580
-rw-r--r--deps/v8/src/prettyprinter.cc65
-rw-r--r--deps/v8/src/prettyprinter.h4
-rw-r--r--deps/v8/src/profile-generator-inl.h32
-rw-r--r--deps/v8/src/profile-generator.cc95
-rw-r--r--deps/v8/src/profile-generator.h41
-rw-r--r--deps/v8/src/regexp-macro-assembler.h2
-rw-r--r--deps/v8/src/regexp.js15
-rw-r--r--deps/v8/src/rewriter.cc64
-rw-r--r--deps/v8/src/runtime.cc427
-rw-r--r--deps/v8/src/runtime.h5
-rw-r--r--deps/v8/src/runtime.js5
-rwxr-xr-xdeps/v8/src/scanner.cc154
-rw-r--r--deps/v8/src/scanner.h102
-rw-r--r--deps/v8/src/serialize.cc39
-rw-r--r--deps/v8/src/serialize.h4
-rw-r--r--deps/v8/src/spaces-inl.h34
-rw-r--r--deps/v8/src/spaces.cc276
-rw-r--r--deps/v8/src/spaces.h86
-rw-r--r--deps/v8/src/stub-cache.cc91
-rw-r--r--deps/v8/src/stub-cache.h230
-rw-r--r--deps/v8/src/token.h4
-rw-r--r--deps/v8/src/top.cc34
-rw-r--r--deps/v8/src/top.h9
-rw-r--r--deps/v8/src/utils.h213
-rw-r--r--deps/v8/src/v8-counters.h19
-rw-r--r--deps/v8/src/v8.h3
-rw-r--r--deps/v8/src/v8natives.js28
-rw-r--r--deps/v8/src/v8threads.cc22
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/builtins-x64.cc14
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc4015
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h389
-rw-r--r--deps/v8/src/x64/codegen-x64.cc4249
-rw-r--r--deps/v8/src/x64/codegen-x64.h365
-rw-r--r--deps/v8/src/x64/debug-x64.cc92
-rw-r--r--deps/v8/src/x64/frames-x64.cc60
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc942
-rw-r--r--deps/v8/src/x64/ic-x64.cc60
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc198
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h29
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc167
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc4
-rw-r--r--deps/v8/test/cctest/cctest.status8
-rw-r--r--deps/v8/test/cctest/test-api.cc34
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc54
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc2
-rw-r--r--deps/v8/test/cctest/test-ast.cc29
-rw-r--r--deps/v8/test/cctest/test-debug.cc147
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc48
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc125
-rw-r--r--deps/v8/test/cctest/test-heap.cc40
-rw-r--r--deps/v8/test/cctest/test-list.cc39
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc124
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc112
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc17
-rw-r--r--deps/v8/test/cctest/test-serialize.cc15
-rw-r--r--deps/v8/test/cctest/test-utils.cc61
-rw-r--r--deps/v8/test/cctest/testcfg.py8
-rw-r--r--deps/v8/test/es5conform/testcfg.py5
-rw-r--r--deps/v8/test/message/testcfg.py5
-rw-r--r--deps/v8/test/mjsunit/array-splice.js7
-rw-r--r--deps/v8/test/mjsunit/binary-op-newspace.js31
-rw-r--r--deps/v8/test/mjsunit/const-eval-init.js4
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js6
-rw-r--r--deps/v8/test/mjsunit/json.js29
-rw-r--r--deps/v8/test/mjsunit/regexp.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-842.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-851.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-push-args-twice.js37
-rw-r--r--deps/v8/test/mjsunit/shifts.js38
-rw-r--r--deps/v8/test/mjsunit/str-to-num.js4
-rw-r--r--deps/v8/test/mjsunit/testcfg.py12
-rw-r--r--deps/v8/test/mjsunit/third_party/array-splice-webkit.js2
-rw-r--r--deps/v8/test/mozilla/mozilla.status5
-rw-r--r--deps/v8/test/mozilla/testcfg.py7
-rw-r--r--deps/v8/test/sputnik/testcfg.py10
-rwxr-xr-xdeps/v8/tools/gc-nvp-trace-processor.py12
-rw-r--r--deps/v8/tools/gyp/v8.gyp8
-rw-r--r--deps/v8/tools/oom_dump/README11
-rw-r--r--deps/v8/tools/oom_dump/oom_dump.cc27
-rwxr-xr-xdeps/v8/tools/test.py73
-rw-r--r--deps/v8/tools/utils.py10
-rw-r--r--deps/v8/tools/v8.xcodeproj/project.pbxproj20
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj16
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj8
206 files changed, 24619 insertions, 25550 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 4c96de014b..95a3640604 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,76 @@
+2010-09-08: Version 2.4.2
+
+ Fixed GC crash bug.
+
+ Fixed stack corruption bug.
+
+ Fixed compilation for newer C++ compilers that found Operand(0)
+ ambiguous.
+
+
+2010-09-06: Version 2.4.1
+
+ Added the ability for an embedding application to receive a callback
+ when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates
+ (V8::RemoveMemoryAllocationCallback) from the OS.
+
+ Fixed several JSON bugs (including issue 855).
+
+ Fixed memory overrun crash bug triggered during V8's tick-based
+ profiling.
+
+ Performance improvements on all platforms.
+
+
+2010-09-01: Version 2.4.0
+
+ Fix bug in Object.freeze and Object.seal when Array.prototype or
+ Object.prototype is changed (issue 842).
+
+ Update Array.splice to follow Safari and Firefox when called
+ with zero arguments.
+
+ Fix a missing live register when breaking at keyed loads on ARM.
+
+ Performance improvements on all platforms.
+
+
+2010-08-25: Version 2.3.11
+
+ Fix bug in RegExp related to copy-on-write arrays.
+
+ Refactoring of tools/test.py script, including the introduction of
+ VARIANT_FLAGS that allows specification of sets of flags with which
+ all tests should be run.
+
+ Fix a bug in the handling of debug breaks in CallIC.
+
+ Performance improvements on all platforms.
+
+
+2010-08-23: Version 2.3.10
+
+ Fix bug in bitops on ARM.
+
+ Build fixes for unusual compilers.
+
+ Track high water mark for RWX memory.
+
+ Performance improvements on all platforms.
+
+
+2010-08-18: Version 2.3.9
+
+ Fix compilation for ARMv4 on OpenBSD/FreeBSD.
+
+ Removed specialized handling of GCC 4.4 (issue 830).
+
+ Fixed DST cache to take into account the suspension of DST in
+ Egypt during the 2010 Ramadan (issue http://crbug.com/51855).
+
+ Performance improvements on all platforms.
+
+
2010-08-16: Version 2.3.8
Fixed build with strict aliasing on GCC 4.4 (issue 463).
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 8fc192637c..2a39583f1c 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -54,15 +54,8 @@ if ARM_TARGET_LIB:
else:
ARM_LINK_FLAGS = []
-# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
-# on linux we need these compiler flags to avoid crashes in the v8 test suite
-# and avoid dtoa.c strict aliasing issues
-if os.environ.get('GCC_VERSION') == '44':
- GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
- GCC_DTOA_EXTRA_CCFLAGS = []
-else:
- GCC_EXTRA_CCFLAGS = []
- GCC_DTOA_EXTRA_CCFLAGS = []
+GCC_EXTRA_CCFLAGS = []
+GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv7-a',
'-mtune=cortex-a8',
@@ -299,6 +292,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -672,7 +666,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 9e3cb873c6..dd1b8caf7b 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -260,10 +260,17 @@ class V8EXPORT HeapGraphNode {
/**
* Returns node id. For the same heap object, the id remains the same
- * across all snapshots.
+ * across all snapshots. Not applicable to aggregated heap snapshots
+ * as they only contain aggregated instances.
*/
uint64_t GetId() const;
+ /**
+ * Returns the number of instances. Only applicable to aggregated
+ * heap snapshots.
+ */
+ int GetInstancesCount() const;
+
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@@ -313,6 +320,15 @@ class V8EXPORT HeapSnapshotsDiff {
*/
class V8EXPORT HeapSnapshot {
public:
+ enum Type {
+ kFull = 0, // Heap snapshot with all instances and references.
+ kAggregated = 1 // Snapshot doesn't contain individual heap entries,
+ //instead they are grouped by constructor name.
+ };
+
+ /** Returns heap snapshot type. */
+ Type GetType() const;
+
/** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const;
@@ -322,7 +338,10 @@ class V8EXPORT HeapSnapshot {
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetRoot() const;
- /** Returns a diff between this snapshot and another one. */
+ /**
+ * Returns a diff between this snapshot and another one. Only snapshots
+ * of the same type can be compared.
+ */
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
};
@@ -341,8 +360,13 @@ class V8EXPORT HeapProfiler {
/** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid);
- /** Takes a heap snapshot and returns it. Title may be an empty string. */
- static const HeapSnapshot* TakeSnapshot(Handle<String> title);
+ /**
+ * Takes a heap snapshot and returns it. Title may be an empty string.
+ * See HeapSnapshot::Type for types description.
+ */
+ static const HeapSnapshot* TakeSnapshot(
+ Handle<String> title,
+ HeapSnapshot::Type type = HeapSnapshot::kFull);
};
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index ff73226925..b89c244ca2 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1763,8 +1763,6 @@ class V8EXPORT AccessorInfo {
typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
-typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
-
/**
* NamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler.
@@ -2361,6 +2359,30 @@ typedef void* (*CreateHistogramCallback)(const char* name,
typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
+// --- M e m o r y A l l o c a t i o n C a l l b a c k ---
+ enum ObjectSpace {
+ kObjectSpaceNewSpace = 1 << 0,
+ kObjectSpaceOldPointerSpace = 1 << 1,
+ kObjectSpaceOldDataSpace = 1 << 2,
+ kObjectSpaceCodeSpace = 1 << 3,
+ kObjectSpaceMapSpace = 1 << 4,
+ kObjectSpaceLoSpace = 1 << 5,
+
+ kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
+ kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
+ kObjectSpaceLoSpace
+ };
+
+ enum AllocationAction {
+ kAllocationActionAllocate = 1 << 0,
+ kAllocationActionFree = 1 << 1,
+ kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree
+ };
+
+typedef void (*MemoryAllocationCallback)(ObjectSpace space,
+ AllocationAction action,
+ int size);
+
// --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
typedef void (*FailedAccessCheckCallback)(Local<Object> target,
AccessType type,
@@ -2581,6 +2603,20 @@ class V8EXPORT V8 {
static void SetGlobalGCEpilogueCallback(GCCallback);
/**
+ * Enables the host application to provide a mechanism to be notified
+ * and perform custom logging when V8 Allocates Executable Memory.
+ */
+ static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+
+ /**
+ * This function removes callback which was installed by
+ * AddMemoryAllocationCallback function.
+ */
+ static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+
+ /**
* Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive.
* After each garbage collection, object groups are removed. It is
diff --git a/deps/v8/node_cygwin_patch.diff b/deps/v8/node_cygwin_patch.diff
deleted file mode 100644
index eeedc6d6ea..0000000000
--- a/deps/v8/node_cygwin_patch.diff
+++ /dev/null
@@ -1,918 +0,0 @@
-diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
-index 7219e9d..b8de1b8 100644
---- a/deps/v8/SConstruct
-+++ b/deps/v8/SConstruct
-@@ -670,7 +670,7 @@ SIMPLE_OPTIONS = {
- 'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
- },
- 'os': {
-- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
-+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
- 'default': OS_GUESS,
- 'help': 'the os to build for (' + OS_GUESS + ')'
- },
-diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
-index 8466a0c..9ff3414 100755
---- a/deps/v8/src/SConscript
-+++ b/deps/v8/src/SConscript
-@@ -206,6 +206,7 @@ SOURCES = {
- 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
- 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
-+ 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
- 'os:nullos': ['platform-nullos.cc'],
- 'os:win32': ['platform-win32.cc'],
- 'mode:release': [],
-diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
-new file mode 100644
-index 0000000..34410e8
---- /dev/null
-+++ b/deps/v8/src/platform-cygwin.cc
-@@ -0,0 +1,858 @@
-+// Copyright 2006-2008 the V8 project authors. All rights reserved.
-+// Redistribution and use in source and binary forms, with or without
-+// modification, are permitted provided that the following conditions are
-+// met:
-+//
-+// * Redistributions of source code must retain the above copyright
-+// notice, this list of conditions and the following disclaimer.
-+// * Redistributions in binary form must reproduce the above
-+// copyright notice, this list of conditions and the following
-+// disclaimer in the documentation and/or other materials provided
-+// with the distribution.
-+// * Neither the name of Google Inc. nor the names of its
-+// contributors may be used to endorse or promote products derived
-+// from this software without specific prior written permission.
-+//
-+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-+// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-+// the implementation is in platform-posix.cc.
-+
-+#include <pthread.h>
-+#include <semaphore.h>
-+#include <signal.h>
-+#include <sys/time.h>
-+#include <sys/resource.h>
-+#include <sys/types.h>
-+#include <stdlib.h>
-+
-+// Ubuntu Dapper requires memory pages to be marked as
-+// executable. Otherwise, OS raises an exception when executing code
-+// in that page.
-+#include <sys/types.h> // mmap & munmap
-+#include <sys/mman.h> // mmap & munmap
-+#include <sys/stat.h> // open
-+#include <fcntl.h> // open
-+#include <unistd.h> // sysconf
-+#ifdef __GLIBC__
-+#include <execinfo.h> // backtrace, backtrace_symbols
-+#endif // def __GLIBC__
-+#include <strings.h> // index
-+#include <errno.h>
-+#include <stdarg.h>
-+
-+#undef MAP_TYPE
-+
-+#include "v8.h"
-+
-+#include "platform.h"
-+#include "top.h"
-+#include "v8threads.h"
-+
-+
-+namespace v8 {
-+namespace internal {
-+
-+// 0 is never a valid thread id on Linux since tids and pids share a
-+// name space and pid 0 is reserved (see man 2 kill).
-+static const pthread_t kNoThread = (pthread_t) 0;
-+
-+
-+double ceiling(double x) {
-+ return ceil(x);
-+}
-+
-+
-+void OS::Setup() {
-+ // Seed the random number generator.
-+ // Convert the current time to a 64-bit integer first, before converting it
-+ // to an unsigned. Going directly can cause an overflow and the seed to be
-+ // set to all ones. The seed will be identical for different instances that
-+ // call this setup code within the same millisecond.
-+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
-+ srandom(static_cast<unsigned int>(seed));
-+}
-+
-+
-+uint64_t OS::CpuFeaturesImpliedByPlatform() {
-+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
-+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we
-+ // have VFP3 instructions. If gcc can assume it then so can we.
-+ return 1u << VFP3;
-+#elif CAN_USE_ARMV7_INSTRUCTIONS
-+ return 1u << ARMv7;
-+#else
-+ return 0; // Linux runs on anything.
-+#endif
-+}
-+
-+
-+#ifdef __arm__
-+bool OS::ArmCpuHasFeature(CpuFeature feature) {
-+ const char* search_string = NULL;
-+ const char* file_name = "/proc/cpuinfo";
-+ // Simple detection of VFP at runtime for Linux.
-+ // It is based on /proc/cpuinfo, which reveals hardware configuration
-+ // to user-space applications. According to ARM (mid 2009), no similar
-+ // facility is universally available on the ARM architectures,
-+ // so it's up to individual OSes to provide such.
-+ //
-+ // This is written as a straight shot one pass parser
-+ // and not using STL string and ifstream because,
-+ // on Linux, it's reading from a (non-mmap-able)
-+ // character special device.
-+ switch (feature) {
-+ case VFP3:
-+ search_string = "vfp";
-+ break;
-+ case ARMv7:
-+ search_string = "ARMv7";
-+ break;
-+ default:
-+ UNREACHABLE();
-+ }
-+
-+ FILE* f = NULL;
-+ const char* what = search_string;
-+
-+ if (NULL == (f = fopen(file_name, "r")))
-+ return false;
-+
-+ int k;
-+ while (EOF != (k = fgetc(f))) {
-+ if (k == *what) {
-+ ++what;
-+ while ((*what != '\0') && (*what == fgetc(f))) {
-+ ++what;
-+ }
-+ if (*what == '\0') {
-+ fclose(f);
-+ return true;
-+ } else {
-+ what = search_string;
-+ }
-+ }
-+ }
-+ fclose(f);
-+
-+ // Did not find string in the proc file.
-+ return false;
-+}
-+#endif // def __arm__
-+
-+
-+int OS::ActivationFrameAlignment() {
-+#ifdef V8_TARGET_ARCH_ARM
-+ // On EABI ARM targets this is required for fp correctness in the
-+ // runtime system.
-+ return 8;
-+#elif V8_TARGET_ARCH_MIPS
-+ return 8;
-+#endif
-+ // With gcc 4.4 the tree vectorization optimizer can generate code
-+ // that requires 16 byte alignment such as movdqa on x86.
-+ return 16;
-+}
-+
-+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-+ __asm__ __volatile__("" : : : "memory");
-+ // An x86 store acts as a release barrier.
-+ *ptr = value;
-+}
-+
-+const char* OS::LocalTimezone(double time) {
-+ if (isnan(time)) return "";
-+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
-+ struct tm* t = localtime(&tv);
-+ if (NULL == t) return "";
-+ return tzname[0]; // The location of the timezone string on Cywin.
-+}
-+
-+
-+double OS::LocalTimeOffset() {
-+ //
-+ // On Cygwin, struct tm does not contain a tm_gmtoff field.
-+ time_t utc = time(NULL);
-+ ASSERT(utc != -1);
-+ struct tm* loc = localtime(&utc);
-+ ASSERT(loc != NULL);
-+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
-+}
-+
-+
-+// We keep the lowest and highest addresses mapped as a quick way of
-+// determining that pointers are outside the heap (used mostly in assertions
-+// and verification). The estimate is conservative, ie, not all addresses in
-+// 'allocated' space are actually allocated to our heap. The range is
-+// [lowest, highest), inclusive on the low and and exclusive on the high end.
-+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-+
-+
-+static void UpdateAllocatedSpaceLimits(void* address, int size) {
-+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
-+ highest_ever_allocated =
-+ Max(highest_ever_allocated,
-+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-+}
-+
-+
-+bool OS::IsOutsideAllocatedSpace(void* address) {
-+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
-+}
-+
-+
-+size_t OS::AllocateAlignment() {
-+ return sysconf(_SC_PAGESIZE);
-+}
-+
-+
-+void* OS::Allocate(const size_t requested,
-+ size_t* allocated,
-+ bool is_executable) {
-+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
-+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-+ if (mbase == MAP_FAILED) {
-+ LOG(StringEvent("OS::Allocate", "mmap failed"));
-+ return NULL;
-+ }
-+ *allocated = msize;
-+ UpdateAllocatedSpaceLimits(mbase, msize);
-+ return mbase;
-+}
-+
-+
-+void OS::Free(void* address, const size_t size) {
-+ // TODO(1240712): munmap has a return value which is ignored here.
-+ int result = munmap(address, size);
-+ USE(result);
-+ ASSERT(result == 0);
-+}
-+
-+
-+#ifdef ENABLE_HEAP_PROTECTION
-+
-+void OS::Protect(void* address, size_t size) {
-+ // TODO(1240712): mprotect has a return value which is ignored here.
-+ mprotect(address, size, PROT_READ);
-+}
-+
-+
-+void OS::Unprotect(void* address, size_t size, bool is_executable) {
-+ // TODO(1240712): mprotect has a return value which is ignored here.
-+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-+ mprotect(address, size, prot);
-+}
-+
-+#endif
-+
-+
-+void OS::Sleep(int milliseconds) {
-+ unsigned int ms = static_cast<unsigned int>(milliseconds);
-+ usleep(1000 * ms);
-+}
-+
-+
-+void OS::Abort() {
-+ // Redirect to std abort to signal abnormal program termination.
-+ abort();
-+}
-+
-+
-+void OS::DebugBreak() {
-+// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-+// which is the architecture of generated code).
-+#if (defined(__arm__) || defined(__thumb__)) && \
-+ defined(CAN_USE_ARMV5_INSTRUCTIONS)
-+ asm("bkpt 0");
-+#elif defined(__mips__)
-+ asm("break");
-+#else
-+ asm("int $3");
-+#endif
-+}
-+
-+
-+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
-+ public:
-+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
-+ : file_(file), memory_(memory), size_(size) { }
-+ virtual ~PosixMemoryMappedFile();
-+ virtual void* memory() { return memory_; }
-+ private:
-+ FILE* file_;
-+ void* memory_;
-+ int size_;
-+};
-+
-+
-+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-+ void* initial) {
-+ FILE* file = fopen(name, "w+");
-+ if (file == NULL) return NULL;
-+ int result = fwrite(initial, size, 1, file);
-+ if (result < 1) {
-+ fclose(file);
-+ return NULL;
-+ }
-+ void* memory =
-+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-+ return new PosixMemoryMappedFile(file, memory, size);
-+}
-+
-+
-+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-+ if (memory_) munmap(memory_, size_);
-+ fclose(file_);
-+}
-+
-+
-+void OS::LogSharedLibraryAddresses() {
-+#ifdef ENABLE_LOGGING_AND_PROFILING
-+ // This function assumes that the layout of the file is as follows:
-+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
-+ // If we encounter an unexpected situation we abort scanning further entries.
-+ FILE* fp = fopen("/proc/self/maps", "r");
-+ if (fp == NULL) return;
-+
-+ // Allocate enough room to be able to store a full file name.
-+ const int kLibNameLen = FILENAME_MAX + 1;
-+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-+
-+ // This loop will terminate once the scanning hits an EOF.
-+ while (true) {
-+ uintptr_t start, end;
-+ char attr_r, attr_w, attr_x, attr_p;
-+ // Parse the addresses and permission bits at the beginning of the line.
-+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
-+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-+
-+ int c;
-+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
-+ // Found a read-only executable entry. Skip characters until we reach
-+ // the beginning of the filename or the end of the line.
-+ do {
-+ c = getc(fp);
-+ } while ((c != EOF) && (c != '\n') && (c != '/'));
-+ if (c == EOF) break; // EOF: Was unexpected, just exit.
-+
-+ // Process the filename if found.
-+ if (c == '/') {
-+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
-+
-+ // Read to the end of the line. Exit if the read fails.
-+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-+
-+ // Drop the newline character read by fgets. We do not need to check
-+ // for a zero-length string because we know that we at least read the
-+ // '/' character.
-+ lib_name[strlen(lib_name) - 1] = '\0';
-+ } else {
-+ // No library name found, just record the raw address range.
-+ snprintf(lib_name, kLibNameLen,
-+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
-+ }
-+ LOG(SharedLibraryEvent(lib_name, start, end));
-+ } else {
-+ // Entry not describing executable data. Skip to end of line to setup
-+ // reading the next entry.
-+ do {
-+ c = getc(fp);
-+ } while ((c != EOF) && (c != '\n'));
-+ if (c == EOF) break;
-+ }
-+ }
-+ free(lib_name);
-+ fclose(fp);
-+#endif
-+}
-+
-+
-+int OS::StackWalk(Vector<OS::StackFrame> frames) {
-+ // backtrace is a glibc extension.
-+#ifdef __GLIBC__
-+ int frames_size = frames.length();
-+ ScopedVector<void*> addresses(frames_size);
-+
-+ int frames_count = backtrace(addresses.start(), frames_size);
-+
-+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
-+ if (symbols == NULL) {
-+ return kStackWalkError;
-+ }
-+
-+ for (int i = 0; i < frames_count; i++) {
-+ frames[i].address = addresses[i];
-+ // Format a text representation of the frame based on the information
-+ // available.
-+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
-+ "%s",
-+ symbols[i]);
-+ // Make sure line termination is in place.
-+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-+ }
-+
-+ free(symbols);
-+
-+ return frames_count;
-+#else // ndef __GLIBC__
-+ return 0;
-+#endif // ndef __GLIBC__
-+}
-+
-+
-+// Constants used for mmap.
-+static const int kMmapFd = -1;
-+static const int kMmapFdOffset = 0;
-+
-+
-+VirtualMemory::VirtualMemory(size_t size) {
-+ address_ = mmap(NULL, size, PROT_NONE,
-+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-+ kMmapFd, kMmapFdOffset);
-+ size_ = size;
-+}
-+
-+
-+VirtualMemory::~VirtualMemory() {
-+ if (IsReserved()) {
-+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
-+ }
-+}
-+
-+
-+bool VirtualMemory::IsReserved() {
-+ return address_ != MAP_FAILED;
-+}
-+
-+
-+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-+
-+#ifdef HAS_MAP_FIXED
-+ if (MAP_FAILED == mmap(address, size, prot,
-+ MAP_PRIVATE | MAP_ANONYMOUS, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
-+ kMmapFd, kMmapFdOffset)) {
-+ return false;
-+ }
-+#else
-+ if (mprotect(address, size, prot) != 0) {
-+ return false;
-+ }
-+#endif
-+
-+ UpdateAllocatedSpaceLimits(address, size);
-+ return true;
-+}
-+
-+
-+bool VirtualMemory::Uncommit(void* address, size_t size) {
-+ return mmap(address, size, PROT_NONE,
-+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
-+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
-+}
-+
-+
-+class ThreadHandle::PlatformData : public Malloced {
-+ public:
-+ explicit PlatformData(ThreadHandle::Kind kind) {
-+ Initialize(kind);
-+ }
-+
-+ void Initialize(ThreadHandle::Kind kind) {
-+ switch (kind) {
-+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
-+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
-+ }
-+ }
-+
-+ pthread_t thread_; // Thread handle for pthread.
-+};
-+
-+
-+ThreadHandle::ThreadHandle(Kind kind) {
-+ data_ = new PlatformData(kind);
-+}
-+
-+
-+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-+ data_->Initialize(kind);
-+}
-+
-+
-+ThreadHandle::~ThreadHandle() {
-+ delete data_;
-+}
-+
-+
-+bool ThreadHandle::IsSelf() const {
-+ return pthread_equal(data_->thread_, pthread_self());
-+}
-+
-+
-+bool ThreadHandle::IsValid() const {
-+ return data_->thread_ != kNoThread;
-+}
-+
-+
-+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
-+}
-+
-+
-+Thread::~Thread() {
-+}
-+
-+
-+static void* ThreadEntry(void* arg) {
-+ Thread* thread = reinterpret_cast<Thread*>(arg);
-+ // This is also initialized by the first argument to pthread_create() but we
-+ // don't know which thread will run first (the original thread or the new
-+ // one) so we initialize it here too.
-+ thread->thread_handle_data()->thread_ = pthread_self();
-+ ASSERT(thread->IsValid());
-+ thread->Run();
-+ return NULL;
-+}
-+
-+
-+void Thread::Start() {
-+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
-+ ASSERT(IsValid());
-+}
-+
-+
-+void Thread::Join() {
-+ pthread_join(thread_handle_data()->thread_, NULL);
-+}
-+
-+
-+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-+ pthread_key_t key;
-+ int result = pthread_key_create(&key, NULL);
-+ USE(result);
-+ ASSERT(result == 0);
-+ return static_cast<LocalStorageKey>(key);
-+}
-+
-+
-+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-+ int result = pthread_key_delete(pthread_key);
-+ USE(result);
-+ ASSERT(result == 0);
-+}
-+
-+
-+void* Thread::GetThreadLocal(LocalStorageKey key) {
-+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-+ return pthread_getspecific(pthread_key);
-+}
-+
-+
-+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-+ pthread_setspecific(pthread_key, value);
-+}
-+
-+
-+void Thread::YieldCPU() {
-+ sched_yield();
-+}
-+
-+
-+class CygwinMutex : public Mutex {
-+ public:
-+
-+ CygwinMutex() {
-+ pthread_mutexattr_t attrs;
-+ memset(&attrs, 0, sizeof(attrs));
-+
-+ int result = pthread_mutexattr_init(&attrs);
-+ ASSERT(result == 0);
-+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-+ ASSERT(result == 0);
-+ result = pthread_mutex_init(&mutex_, &attrs);
-+ ASSERT(result == 0);
-+ }
-+
-+ virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-+
-+ virtual int Lock() {
-+ int result = pthread_mutex_lock(&mutex_);
-+ return result;
-+ }
-+
-+ virtual int Unlock() {
-+ int result = pthread_mutex_unlock(&mutex_);
-+ return result;
-+ }
-+
-+ private:
-+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-+};
-+
-+
-+Mutex* OS::CreateMutex() {
-+ return new CygwinMutex();
-+}
-+
-+
-+class CygwinSemaphore : public Semaphore {
-+ public:
-+ explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
-+ virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-+
-+ virtual void Wait();
-+ virtual bool Wait(int timeout);
-+ virtual void Signal() { sem_post(&sem_); }
-+ private:
-+ sem_t sem_;
-+};
-+
-+
-+void CygwinSemaphore::Wait() {
-+ while (true) {
-+ int result = sem_wait(&sem_);
-+ if (result == 0) return; // Successfully got semaphore.
-+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
-+ }
-+}
-+
-+
-+#ifndef TIMEVAL_TO_TIMESPEC
-+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
-+ (ts)->tv_sec = (tv)->tv_sec; \
-+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-+} while (false)
-+#endif
-+
-+
-+bool CygwinSemaphore::Wait(int timeout) {
-+ const long kOneSecondMicros = 1000000; // NOLINT
-+
-+ // Split timeout into second and nanosecond parts.
-+ struct timeval delta;
-+ delta.tv_usec = timeout % kOneSecondMicros;
-+ delta.tv_sec = timeout / kOneSecondMicros;
-+
-+ struct timeval current_time;
-+ // Get the current time.
-+ if (gettimeofday(&current_time, NULL) == -1) {
-+ return false;
-+ }
-+
-+ // Calculate time for end of timeout.
-+ struct timeval end_time;
-+ timeradd(&current_time, &delta, &end_time);
-+
-+ struct timespec ts;
-+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-+ // Wait for semaphore signalled or timeout.
-+ while (true) {
-+ int result = sem_timedwait(&sem_, &ts);
-+ if (result == 0) return true; // Successfully got semaphore.
-+ if (result > 0) {
-+ // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
-+ errno = result;
-+ result = -1;
-+ }
-+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
-+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
-+ }
-+}
-+
-+
-+Semaphore* OS::CreateSemaphore(int count) {
-+ return new CygwinSemaphore(count);
-+}
-+
-+
-+#ifdef ENABLE_LOGGING_AND_PROFILING
-+
-+static Sampler* active_sampler_ = NULL;
-+static pthread_t vm_thread_ = 0;
-+
-+
-+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
-+// Android runs a fairly new Linux kernel, so signal info is there,
-+// but the C library doesn't have the structs defined.
-+
-+struct sigcontext {
-+ uint32_t trap_no;
-+ uint32_t error_code;
-+ uint32_t oldmask;
-+ uint32_t gregs[16];
-+ uint32_t arm_cpsr;
-+ uint32_t fault_address;
-+};
-+typedef uint32_t __sigset_t;
-+typedef struct sigcontext mcontext_t;
-+typedef struct ucontext {
-+ uint32_t uc_flags;
-+ struct ucontext* uc_link;
-+ stack_t uc_stack;
-+ mcontext_t uc_mcontext;
-+ __sigset_t uc_sigmask;
-+} ucontext_t;
-+enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-+
-+#endif
-+
-+
-+// A function that determines if a signal handler is called in the context
-+// of a VM thread.
-+//
-+// The problem is that SIGPROF signal can be delivered to an arbitrary thread
-+// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
-+// So, if the signal is being handled in the context of a non-VM thread,
-+// it means that the VM thread is running, and trying to sample its stack can
-+// cause a crash.
-+static inline bool IsVmThread() {
-+ // In the case of a single VM thread, this check is enough.
-+ if (pthread_equal(pthread_self(), vm_thread_)) return true;
-+ // If there are multiple threads that use VM, they must have a thread id
-+ // stored in TLS. To verify that the thread is really executing VM,
-+ // we check Top's data. Having that ThreadManager::RestoreThread first
-+ // restores ThreadLocalTop from TLS, and only then erases the TLS value,
-+ // reading Top::thread_id() should not be affected by races.
-+ if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
-+ ThreadManager::CurrentId() == Top::thread_id()) {
-+ return true;
-+ }
-+ return false;
-+}
-+
-+
-+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-+#ifndef V8_HOST_ARCH_MIPS
-+ USE(info);
-+ if (signal != SIGPROF) return;
-+ if (active_sampler_ == NULL) return;
-+
-+ TickSample sample_obj;
-+ TickSample* sample = CpuProfiler::TickSampleEvent();
-+ if (sample == NULL) sample = &sample_obj;
-+
-+ // We always sample the VM state.
-+ sample->state = VMState::current_state();
-+
-+#if 0
-+ // If profiling, we extract the current pc and sp.
-+ if (active_sampler_->IsProfiling()) {
-+ // Extracting the sample from the context is extremely machine dependent.
-+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-+ mcontext_t& mcontext = ucontext->uc_mcontext;
-+#if V8_HOST_ARCH_IA32
-+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
-+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
-+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-+#elif V8_HOST_ARCH_X64
-+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
-+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
-+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-+#elif V8_HOST_ARCH_ARM
-+// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
-+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
-+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
-+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-+#else
-+ sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
-+ sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
-+ sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-+#endif
-+#elif V8_HOST_ARCH_MIPS
-+ // Implement this on MIPS.
-+ UNIMPLEMENTED();
-+#endif
-+ if (IsVmThread()) {
-+ active_sampler_->SampleStack(sample);
-+ }
-+ }
-+#endif
-+
-+ active_sampler_->Tick(sample);
-+#endif
-+}
-+
-+
-+class Sampler::PlatformData : public Malloced {
-+ public:
-+ PlatformData() {
-+ signal_handler_installed_ = false;
-+ }
-+
-+ bool signal_handler_installed_;
-+ struct sigaction old_signal_handler_;
-+ struct itimerval old_timer_value_;
-+};
-+
-+
-+Sampler::Sampler(int interval, bool profiling)
-+ : interval_(interval), profiling_(profiling), active_(false) {
-+ data_ = new PlatformData();
-+}
-+
-+
-+Sampler::~Sampler() {
-+ delete data_;
-+}
-+
-+
-+void Sampler::Start() {
-+ // There can only be one active sampler at the time on POSIX
-+ // platforms.
-+ if (active_sampler_ != NULL) return;
-+
-+ vm_thread_ = pthread_self();
-+
-+ // Request profiling signals.
-+ struct sigaction sa;
-+ sa.sa_sigaction = ProfilerSignalHandler;
-+ sigemptyset(&sa.sa_mask);
-+ sa.sa_flags = SA_SIGINFO;
-+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
-+ data_->signal_handler_installed_ = true;
-+
-+ // Set the itimer to generate a tick for each interval.
-+ itimerval itimer;
-+ itimer.it_interval.tv_sec = interval_ / 1000;
-+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
-+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
-+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
-+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-+
-+ // Set this sampler as the active sampler.
-+ active_sampler_ = this;
-+ active_ = true;
-+}
-+
-+
-+void Sampler::Stop() {
-+ // Restore old signal handler
-+ if (data_->signal_handler_installed_) {
-+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
-+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
-+ data_->signal_handler_installed_ = false;
-+ }
-+
-+ // This sampler is no longer the active sampler.
-+ active_sampler_ = NULL;
-+ active_ = false;
-+}
-+
-+
-+#endif // ENABLE_LOGGING_AND_PROFILING
-+
-+} } // namespace v8::internal
-diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
-index d63ca5e..1091ba6 100644
---- a/deps/v8/src/platform.h
-+++ b/deps/v8/src/platform.h
-@@ -360,7 +360,11 @@ class ThreadHandle {
- class Thread: public ThreadHandle {
- public:
- // Opaque data type for thread-local storage keys.
-+#ifndef __CYGWIN__
- enum LocalStorageKey {};
-+#else
-+ typedef void *LocalStorageKey;
-+#endif
-
- // Create new thread.
- Thread();
-diff --git a/deps/v8/tools/utils.py b/deps/v8/tools/utils.py
-index 3a55722..505c398 100644
---- a/deps/v8/tools/utils.py
-+++ b/deps/v8/tools/utils.py
-@@ -59,6 +59,8 @@ def GuessOS():
- return 'openbsd'
- elif id == 'SunOS':
- return 'solaris'
-+ elif id.find('CYGWIN') >= 0:
-+ return 'cygwin'
- else:
- return None
-
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 29b8e1f377..7fae8d4b6f 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -62,7 +62,6 @@ SOURCES = {
execution.cc
factory.cc
flags.cc
- flow-graph.cc
frame-element.cc
frames.cc
full-codegen.cc
@@ -121,6 +120,7 @@ SOURCES = {
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
+ arm/code-stubs-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
arm/cpu-arm.cc
@@ -159,6 +159,7 @@ SOURCES = {
virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
+ ia32/code-stubs-ia32.cc
ia32/codegen-ia32.cc
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
@@ -178,6 +179,7 @@ SOURCES = {
virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
+ x64/code-stubs-x64.cc
x64/codegen-x64.cc
x64/cpu-x64.cc
x64/debug-x64.cc
@@ -200,7 +202,6 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
- 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
diff --git a/deps/v8/src/SConscript.orig b/deps/v8/src/SConscript.orig
deleted file mode 100755
index e6b4e3820c..0000000000
--- a/deps/v8/src/SConscript.orig
+++ /dev/null
@@ -1,324 +0,0 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import sys
-from os.path import join, dirname, abspath
-root_dir = dirname(File('SConstruct').rfile().abspath)
-sys.path.append(join(root_dir, 'tools'))
-import js2c
-Import('context')
-
-
-SOURCES = {
- 'all': Split("""
- accessors.cc
- allocation.cc
- api.cc
- assembler.cc
- ast.cc
- bootstrapper.cc
- builtins.cc
- checks.cc
- circular-queue.cc
- code-stubs.cc
- codegen.cc
- compilation-cache.cc
- compiler.cc
- contexts.cc
- conversions.cc
- counters.cc
- cpu-profiler.cc
- data-flow.cc
- dateparser.cc
- debug-agent.cc
- debug.cc
- disassembler.cc
- diy-fp.cc
- dtoa.cc
- execution.cc
- factory.cc
- flags.cc
- flow-graph.cc
- frame-element.cc
- frames.cc
- full-codegen.cc
- func-name-inferrer.cc
- global-handles.cc
- fast-dtoa.cc
- fixed-dtoa.cc
- handles.cc
- hashmap.cc
- heap-profiler.cc
- heap.cc
- ic.cc
- interpreter-irregexp.cc
- jsregexp.cc
- jump-target.cc
- liveedit.cc
- log-utils.cc
- log.cc
- mark-compact.cc
- messages.cc
- objects.cc
- objects-visiting.cc
- oprofile-agent.cc
- parser.cc
- profile-generator.cc
- property.cc
- regexp-macro-assembler-irregexp.cc
- regexp-macro-assembler.cc
- regexp-stack.cc
- register-allocator.cc
- rewriter.cc
- runtime.cc
- scanner.cc
- scopeinfo.cc
- scopes.cc
- serialize.cc
- snapshot-common.cc
- spaces.cc
- string-stream.cc
- stub-cache.cc
- token.cc
- top.cc
- type-info.cc
- unicode.cc
- utils.cc
- v8-counters.cc
- v8.cc
- v8threads.cc
- variables.cc
- version.cc
- virtual-frame.cc
- vm-state.cc
- zone.cc
- """),
- 'arch:arm': Split("""
- jump-target-light.cc
- virtual-frame-light.cc
- arm/builtins-arm.cc
- arm/codegen-arm.cc
- arm/constants-arm.cc
- arm/cpu-arm.cc
- arm/debug-arm.cc
- arm/disasm-arm.cc
- arm/frames-arm.cc
- arm/full-codegen-arm.cc
- arm/ic-arm.cc
- arm/jump-target-arm.cc
- arm/macro-assembler-arm.cc
- arm/regexp-macro-assembler-arm.cc
- arm/register-allocator-arm.cc
- arm/stub-cache-arm.cc
- arm/virtual-frame-arm.cc
- arm/assembler-arm.cc
- """),
- 'arch:mips': Split("""
- mips/assembler-mips.cc
- mips/builtins-mips.cc
- mips/codegen-mips.cc
- mips/constants-mips.cc
- mips/cpu-mips.cc
- mips/debug-mips.cc
- mips/disasm-mips.cc
- mips/full-codegen-mips.cc
- mips/frames-mips.cc
- mips/ic-mips.cc
- mips/jump-target-mips.cc
- mips/macro-assembler-mips.cc
- mips/register-allocator-mips.cc
- mips/stub-cache-mips.cc
- mips/virtual-frame-mips.cc
- """),
- 'arch:ia32': Split("""
- jump-target-heavy.cc
- virtual-frame-heavy.cc
- ia32/assembler-ia32.cc
- ia32/builtins-ia32.cc
- ia32/codegen-ia32.cc
- ia32/cpu-ia32.cc
- ia32/debug-ia32.cc
- ia32/disasm-ia32.cc
- ia32/frames-ia32.cc
- ia32/full-codegen-ia32.cc
- ia32/ic-ia32.cc
- ia32/jump-target-ia32.cc
- ia32/macro-assembler-ia32.cc
- ia32/regexp-macro-assembler-ia32.cc
- ia32/register-allocator-ia32.cc
- ia32/stub-cache-ia32.cc
- ia32/virtual-frame-ia32.cc
- """),
- 'arch:x64': Split("""
- jump-target-heavy.cc
- virtual-frame-heavy.cc
- x64/assembler-x64.cc
- x64/builtins-x64.cc
- x64/codegen-x64.cc
- x64/cpu-x64.cc
- x64/debug-x64.cc
- x64/disasm-x64.cc
- x64/frames-x64.cc
- x64/full-codegen-x64.cc
- x64/ic-x64.cc
- x64/jump-target-x64.cc
- x64/macro-assembler-x64.cc
- x64/regexp-macro-assembler-x64.cc
- x64/register-allocator-x64.cc
- x64/stub-cache-x64.cc
- x64/virtual-frame-x64.cc
- """),
- 'simulator:arm': ['arm/simulator-arm.cc'],
- 'simulator:mips': ['mips/simulator-mips.cc'],
- 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
- 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
- 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
- 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
- 'os:nullos': ['platform-nullos.cc'],
- 'os:win32': ['platform-win32.cc'],
- 'mode:release': [],
- 'mode:debug': [
- 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
- ]
-}
-
-
-D8_FILES = {
- 'all': [
- 'd8.cc', 'd8-debug.cc'
- ],
- 'os:linux': [
- 'd8-posix.cc'
- ],
- 'os:macos': [
- 'd8-posix.cc'
- ],
- 'os:android': [
- 'd8-posix.cc'
- ],
- 'os:freebsd': [
- 'd8-posix.cc'
- ],
- 'os:openbsd': [
- 'd8-posix.cc'
- ],
- 'os:solaris': [
- 'd8-posix.cc'
- ],
- 'os:win32': [
- 'd8-windows.cc'
- ],
- 'os:nullos': [
- 'd8-windows.cc' # Empty implementation at the moment.
- ],
- 'console:readline': [
- 'd8-readline.cc'
- ]
-}
-
-
-LIBRARY_FILES = '''
-runtime.js
-v8natives.js
-array.js
-string.js
-uri.js
-math.js
-messages.js
-apinatives.js
-date.js
-regexp.js
-json.js
-liveedit-debugger.js
-mirror-debugger.js
-debug-debugger.js
-'''.split()
-
-
-def Abort(message):
- print message
- sys.exit(1)
-
-
-def ConfigureObjectFiles():
- env = Environment()
- env.Replace(**context.flags['v8'])
- context.ApplyEnvOverrides(env)
- env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
-
- # Build the standard platform-independent source files.
- source_files = context.GetRelevantSources(SOURCES)
-
- d8_files = context.GetRelevantSources(D8_FILES)
- d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
- d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
- d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
-
- # Combine the JavaScript library files into a single C++ file and
- # compile it.
- library_files = [s for s in LIBRARY_FILES]
- library_files.append('macros.py')
- libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
- libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
-
- # Build dtoa.
- dtoa_env = env.Copy()
- dtoa_env.Replace(**context.flags['dtoa'])
- dtoa_files = ['dtoa-config.c']
- dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files)
-
- source_objs = context.ConfigureObject(env, source_files)
- non_snapshot_files = [dtoa_obj, source_objs]
-
- # Create snapshot if necessary. For cross compilation you should either
- # do without snapshots and take the performance hit or you should build a
- # host VM with the simulator=arm and snapshot=on options and then take the
- # resulting snapshot.cc file from obj/release and put it in the src
- # directory. Then rebuild the VM with the cross compiler and specify
- # snapshot=nobuild on the scons command line.
- empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
- mksnapshot_env = env.Copy()
- mksnapshot_env.Replace(**context.flags['mksnapshot'])
- mksnapshot_src = 'mksnapshot.cc'
- mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
- if context.use_snapshot:
- if context.build_snapshot:
- snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
- else:
- snapshot_cc = 'snapshot.cc'
- snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
- else:
- snapshot_obj = empty_snapshot_obj
- library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
- return (library_objs, d8_objs, [mksnapshot])
-
-
-(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
-Return('library_objs d8_objs mksnapshot')
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 7a840a191e..eeab2acfe4 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -75,8 +75,10 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- static Object* FunctionGetPrototype(Object* object, void*);
- static Object* FunctionSetPrototype(JSObject* object, Object* value, void*);
+ MUST_USE_RESULT static Object* FunctionGetPrototype(Object* object, void*);
+ MUST_USE_RESULT static Object* FunctionSetPrototype(JSObject* object,
+ Object* value,
+ void*);
private:
// Accessor functions only used through the descriptor.
static Object* FunctionGetLength(Object* object, void*);
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 7a967dbffd..0d01fcc75e 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1136,13 +1136,18 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
ScriptData* ScriptData::New(const char* data, int length) {
// Return an empty ScriptData if the length is obviously invalid.
if (length % sizeof(unsigned) != 0) {
- return new i::ScriptDataImpl(i::Vector<unsigned>());
+ return new i::ScriptDataImpl();
}
// Copy the data to ensure it is properly aligned.
int deserialized_data_length = length / sizeof(unsigned);
+ // If aligned, don't create a copy of the data.
+ if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) {
+ return new i::ScriptDataImpl(data, length);
+ }
+ // Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- memcpy(deserialized_data, data, length);
+ i::MemCopy(deserialized_data, data, length);
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -3905,6 +3910,22 @@ void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
}
+void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return;
+ i::MemoryAllocator::AddMemoryAllocationCallback(callback,
+ space,
+ action);
+}
+
+
+void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
+ if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return;
+ i::MemoryAllocator::RemoveMemoryAllocationCallback(callback);
+}
+
+
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
PauseProfilerEx(PROFILER_MODULE_CPU);
@@ -4592,10 +4613,18 @@ Handle<String> HeapGraphNode::GetName() const {
uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId");
+ ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
return ToInternal(this)->id();
}
+int HeapGraphNode::GetInstancesCount() const {
+ IsDeadCheck("v8::HeapGraphNode::GetInstancesCount");
+ ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
+ return static_cast<int>(ToInternal(this)->id());
+}
+
+
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
@@ -4677,6 +4706,12 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
}
+HeapSnapshot::Type HeapSnapshot::GetType() const {
+ IsDeadCheck("v8::HeapSnapshot::GetType");
+ return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
+}
+
+
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
@@ -4724,10 +4759,22 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
}
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title) {
+const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
+ HeapSnapshot::Type type) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
+ i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
+ switch (type) {
+ case HeapSnapshot::kFull:
+ internal_type = i::HeapSnapshot::kFull;
+ break;
+ case HeapSnapshot::kAggregated:
+ internal_type = i::HeapSnapshot::kAggregated;
+ break;
+ default:
+ UNREACHABLE();
+ }
return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title)));
+ i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 136c82e7ef..7d368bf415 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -1809,6 +1809,7 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+
void Assembler::vldr(const DwVfpRegister dst,
const Register base,
int offset,
@@ -1820,6 +1821,7 @@ void Assembler::vldr(const DwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
@@ -1836,7 +1838,10 @@ void Assembler::vldr(const SwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
- emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ ASSERT(offset >= 0);
+ int sd, d;
+ dst.split_code(&sd, &d);
+ emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@@ -1852,11 +1857,31 @@ void Assembler::vstr(const DwVfpRegister src,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
+void Assembler::vstr(const SwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = SSrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1010(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
+ int sd, d;
+ src.split_code(&sd, &d);
+ emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | ((offset / 4) & 255));
+}
+
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -1959,8 +1984,10 @@ void Assembler::vmov(const SwVfpRegister dst,
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B6 | src.code());
+ int sd, d, sm, m;
+ dst.split_code(&sd, &d);
+ src.split_code(&sm, &m);
+ emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
}
@@ -2014,8 +2041,9 @@ void Assembler::vmov(const SwVfpRegister dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc));
- emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
- src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+ int sn, n;
+ dst.split_code(&sn, &n);
+ emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
}
@@ -2028,8 +2056,9 @@ void Assembler::vmov(const Register dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc));
- emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
- dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+ int sn, n;
+ src.split_code(&sn, &n);
+ emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
}
@@ -2079,16 +2108,21 @@ static bool IsDoubleVFPType(VFPType type) {
}
-// Depending on split_last_bit split binary representation of reg_code into Vm:M
-// or M:Vm form (where M is single bit).
-static void SplitRegCode(bool split_last_bit,
+// Split five bit reg_code based on size of reg_type.
+// 32-bit register codes are Vm:M
+// 64-bit register codes are M:Vm
+// where Vm is four bits, and M is a single bit.
+static void SplitRegCode(VFPType reg_type,
int reg_code,
int* vm,
int* m) {
- if (split_last_bit) {
+ ASSERT((reg_code >= 0) && (reg_code <= 31));
+ if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
+ // 32 bit type.
*m = reg_code & 0x1;
*vm = reg_code >> 1;
} else {
+ // 64 bit type.
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
@@ -2101,6 +2135,11 @@ static Instr EncodeVCVT(const VFPType dst_type,
const VFPType src_type,
const int src_code,
const Condition cond) {
+ ASSERT(src_type != dst_type);
+ int D, Vd, M, Vm;
+ SplitRegCode(src_type, src_code, &Vm, &M);
+ SplitRegCode(dst_type, dst_code, &Vd, &D);
+
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
// Conversion between IEEE floating point and 32-bit integer.
// Instruction details available in ARM DDI 0406B, A8.6.295.
@@ -2108,22 +2147,17 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
- int sz, opc2, D, Vd, M, Vm, op;
+ int sz, opc2, op;
if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = 1; // round towards zero
- SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
- SplitRegCode(true, dst_code, &Vd, &D);
} else {
ASSERT(IsIntegerVFPType(src_type));
-
opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
- SplitRegCode(true, src_code, &Vm, &M);
- SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
}
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
@@ -2133,13 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- int sz, D, Vd, M, Vm;
-
- ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
- sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
- SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
-
+ int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 218eb97f3c..be9aa92f1a 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -120,6 +120,11 @@ struct SwVfpRegister {
ASSERT(is_valid());
return 1 << code_;
}
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = code_ & 0x1;
+ *vm = code_ >> 1;
+ }
int code_;
};
@@ -152,6 +157,11 @@ struct DwVfpRegister {
ASSERT(is_valid());
return 1 << code_;
}
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = (code_ & 0x10) >> 4;
+ *vm = code_ & 0x0F;
+ }
int code_;
};
@@ -966,6 +976,11 @@ class Assembler : public Malloced {
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
+ void vstr(const SwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+
void vmov(const DwVfpRegister dst,
double imm,
const Condition cond = al);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 7e7e358c10..8b21558165 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -125,7 +125,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand(0));
+ __ mov(scratch3, Operand(0, RelocInfo::NONE));
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
// Calculate the location of the elements array and set elements array member
@@ -311,7 +311,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
@@ -481,6 +481,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // TODO(849): implement custom construct stub.
+ // Generate a copy of the generic stub for now.
+ Generate_JSConstructStubGeneric(masm);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -505,12 +512,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc.
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0));
+ __ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
@@ -840,7 +843,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r7, cp may be clobbered
// Clear the context before we push it when entering the JS frame.
- __ mov(cp, Operand(0));
+ __ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
__ EnterInternalFrame();
@@ -1027,7 +1030,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ str(r1, MemOperand(r2, -kPointerSize));
// Clear r1 to indicate a non-function being called.
- __ mov(r1, Operand(0));
+ __ mov(r1, Operand(0, RelocInfo::NONE));
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1057,7 +1060,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function;
__ tst(r1, r1);
__ b(ne, &function);
- __ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
@@ -1073,8 +1077,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, ne);
@@ -1121,7 +1124,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push current limit and index.
__ bind(&okay);
__ push(r0); // limit
- __ mov(r1, Operand(0)); // initial index
+ __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
__ push(r1);
// Change context eagerly to get the right global object if necessary.
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
new file mode 100644
index 0000000000..fa93030ff4
--- /dev/null
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -0,0 +1,4688 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_ARM)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* lhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in cp.
+ Label gc;
+
+ // Pop the function info from the stack.
+ __ pop(r3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize,
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+ __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ Push(cp, r3);
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the surrounding context.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
+ __ b(eq, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(r3);
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, expected_map_index);
+ __ cmp(r3, ip);
+ __ Assert(eq, message);
+ __ pop(r3);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
+ // Subtract from 0 if source was negative.
+ __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ cmp(source_, Operand(1));
+ __ b(gt, &not_special);
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, Operand(0, RelocInfo::NONE));
+ __ Ret();
+
+ __ bind(&not_special);
+ // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ CountLeadingZeros(zeros_, source_, mantissa);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here. Use a fudge factor to
+ // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
+ // that fit in the ARM's constant field.
+ int fudge = 0x400;
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
+ __ add(mantissa, mantissa, Operand(fudge));
+ __ orr(exponent,
+ exponent,
+ Operand(mantissa, LSL, HeapNumber::kExponentShift));
+ // Shift up the source chopping the top bit off.
+ __ add(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ mov(source_, Operand(source_, LSL, zeros_));
+ // Compute lower part of fraction (last 12 bits).
+ __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
+ // And the top (top 20 bits).
+ __ orr(exponent,
+ exponent,
+ Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
+ __ Ret();
+}
+
+
+// See comment for class.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent. This test
+ // has the neat side effect of setting the flags according to the sign.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ cmp(the_int_, Operand(0x80000000u));
+ __ b(eq, &max_negative_int);
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ mov(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+ // Subtract from 0 if the value was negative.
+ __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(ip, Operand(0, RelocInfo::NONE));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ __ cmp(r0, r1);
+ __ b(ne, &not_identical);
+
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cc == lt) {
+ __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == gt) {
+ __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+ }
+
+ __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* lhs_not_nan,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ Label rhs_is_smi;
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &rhs_is_smi);
+
+ // Lhs is a Smi. Check whether the rhs is a heap number.
+ __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
+ // succeed. Return non-equal
+ // If rhs is r0 then there is already a non zero value in it.
+ if (!rhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert lhs to a double in d7.
+ CpuFeatures::Scope scope(VFP3);
+ __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ __ push(lr);
+ // Convert lhs to a double in r2, r3.
+ __ mov(r7, Operand(lhs));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Load rhs to a double in r0, r1.
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ pop(lr);
+ }
+
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a smi.
+ __ jmp(lhs_not_nan);
+
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
+ __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs is not a number and rhs is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ // If lhs is r0 then there is already a non zero value in it.
+ if (!lhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
+ } else {
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Rhs is a smi, lhs is a heap number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ // Convert rhs to a double in d6 .
+ __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
+ } else {
+ __ push(lr);
+ // Load lhs to a double in r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ // Convert rhs to a double in r0, r1.
+ __ mov(r7, Operand(rhs));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
+ Label one_is_nan, neither_is_nan;
+
+ __ Sbfx(r4,
+ lhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, lhs_not_nan);
+ __ mov(r4,
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
+ __ b(ne, &one_is_nan);
+
+ __ bind(lhs_not_nan);
+ __ Sbfx(r4,
+ rhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, &neither_is_nan);
+ __ mov(r4,
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
+ __ b(eq, &neither_is_nan);
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in r0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+
+ __ bind(&neither_is_nan);
+}
+
+
+// See comment at call site.
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
+
+ // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ __ cmp(rhs_mantissa, Operand(lhs_mantissa));
+ __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
+ // Return non-zero if the numbers are unequal.
+ __ Ret(ne);
+
+ __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
+ // If exponents are equal then return 0.
+ __ Ret(eq);
+
+ // Exponents are unequal. The only way we can return that the numbers
+ // are equal is if one is -0 and the other is 0. We already dealt
+ // with the case where both are -0 or both are 0.
+ // We start by seeing if the mantissas (that are equal) or the bottom
+ // 31 bits of the rhs exponent are non-zero. If so we return not
+ // equal.
+ __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ mov(r0, Operand(r4), LeaveCC, ne);
+ __ Ret(ne);
+ // Now they are equal if and only if the lhs exponent is zero in its
+ // low 31 bits.
+ __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
+ __ Ret();
+ } else {
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ __ push(lr);
+ __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
+ __ CallCFunction(ExternalReference::compare_doubles(), 4);
+ __ pop(pc); // Return.
+ }
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // If either operand is a JSObject or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r2 and compare it with
+ // FIRST_JS_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &first_non_object);
+
+ // Return non-zero (r0 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r2, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r3, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(ne, &return_not_equal);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, not_heap_numbers);
+ __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ cmp(r2, r3);
+ __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ }
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // r2 is object type of rhs.
+ // Ensure that no non-strings have the symbol bit set.
+ Label object_test;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, &object_test);
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(eq, possible_strings);
+ __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_both_strings);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, possible_strings);
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ mov(r0, Operand(NOT_EQUAL));
+ __ Ret();
+
+ __ bind(&object_test);
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, not_both_strings);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, not_both_strings);
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ and_(r0, r2, Operand(r3));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ __ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ BranchOnSmi(object, &is_smi);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ true);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ __ eor(scratch1, scratch1, Operand(scratch2));
+ __ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ __ ldr(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ BranchOnSmi(probe, not_found);
+ __ sub(scratch2, object, Operand(kHeapObjectTag));
+ __ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ __ sub(probe, probe, Operand(kHeapObjectTag));
+ __ vldr(d1, probe, HeapNumber::kValueOffset);
+ __ vcmp(d0, d1);
+ __ vmrs(pc);
+ __ b(ne, not_found); // The cache did not contain this value.
+ __ b(&load_result_from_cache);
+ } else {
+ __ b(not_found);
+ }
+ }
+
+ __ bind(&is_smi);
+ Register scratch = scratch1;
+ __ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Register probe = mask;
+ __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ cmp(object, probe);
+ __ b(ne, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ ldr(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native,
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ ldr(r1, MemOperand(sp, 0));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
+ __ add(sp, sp, Operand(1 * kPointerSize));
+ __ Ret();
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ __ add(offset_, object_, Operand(offset_));
+ __ RecordWriteHelper(object_, offset_, scratch_);
+ __ Ret();
+}
+
+
+// On entry lhs_ and rhs_ are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ and_(r2, lhs_, Operand(rhs_));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to lhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison. If VFP3 is supported the double values of the numbers have
+ // been loaded into d7 and d6. Otherwise, the double values have been loaded
+ // into r0, r1, r2, and r3.
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in d6 and d7, if
+ // VFP3 is supported, or in r0, r1, r2, and r3.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ bind(&lhs_not_nan);
+ CpuFeatures::Scope scope(VFP3);
+ Label no_nan;
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ vcmp(d7, d6);
+ __ vmrs(pc); // Move vector status bits to normal status bits.
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc_ == lt || cc_ == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+ } else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds lhs_not_nan.
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ }
+
+ Label check_for_symbols;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case r2 will contain the type of rhs_. Never falls through.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &flat_string_check);
+
+ __ bind(&check_for_symbols);
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // symbols.
+ if (cc_ == eq && !strict_) {
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r2 is the type of rhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ r2,
+ r3,
+ r4,
+ r5);
+ // Never falls through to here.
+
+ __ bind(&slow);
+
+ __ Push(lhs_, rhs_);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_JS);
+}
+
+
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result;
+ Label not_heap_number;
+ Register scratch = r7;
+
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, ip);
+ __ b(&not_heap_number, ne);
+
+ __ sub(ip, tos_, Operand(kHeapObjectTag));
+ __ vldr(d1, ip, HeapNumber::kValueOffset);
+ __ vcmp(d1, 0.0);
+ __ vmrs(pc);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // Check if the value is 'null'.
+ // 'null' => false.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(&false_result, eq);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
+ __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, eq);
+
+ // JavaScript object => true.
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // Check for string
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // String value => false iff empty, i.e., length is zero
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false .
+ __ bind(&false_result);
+ __ mov(tos_, Operand(0, RelocInfo::NONE));
+ __ Ret();
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi. The operands are in r0 and r1. In order
+// to call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in r0 and r1 (for the
+// value in r1) and r2 and r3 (for the value in r0).
+void GenericBinaryOpStub::HandleBinaryOpSlowCases(
+ MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin) {
+ Label slow, slow_reverse, do_the_call;
+ bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
+
+ ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
+
+ if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Smi-smi case (overflow).
+ // Since both are Smis there is no heap number to overwrite, so allocate.
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
+
+ // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+ // using registers d7 and d6 for the double values.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt_f64_s32(d7, s15);
+ __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
+ __ mov(r7, Operand(rhs));
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
+ __ mov(r7, Operand(lhs));
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ __ jmp(&do_the_call); // Tail call. No return.
+ }
+
+ // We branch here if at least one of r0 and r1 is not a Smi.
+ __ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // After this point we have the left hand side in r1 and the right hand side
+ // in r0.
+ if (lhs.is(r0)) {
+ __ Swap(r0, r1, ip);
+ }
+
+ // The type transition also calculates the answer.
+ bool generate_code_to_calculate_answer = true;
+
+ if (ShouldGenerateFPCode()) {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ GenerateTypeTransition(masm); // Tail call.
+ generate_code_to_calculate_answer = false;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (generate_code_to_calculate_answer) {
+ Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+ if (mode_ == NO_OVERWRITE) {
+ // In the case where there is no chance of an overwritable float we may
+ // as well do the allocation immediately while r0 and r1 are untouched.
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
+ }
+
+ // Move r0 to a double in r2-r3.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ mov(r5, Operand(r0)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r0);
+ __ bind(&r0_is_smi);
+ if (mode_ == OVERWRITE_RIGHT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt_f64_s32(d7, s15);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ }
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
+ // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
+ Label r1_is_not_smi;
+ if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &r1_is_not_smi);
+ GenerateTypeTransition(masm); // Tail call.
+ }
+
+ __ bind(&finished_loading_r0);
+
+ // Move r1 to a double in r0-r1.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
+ __ bind(&r1_is_not_smi);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_LEFT) {
+ __ mov(r5, Operand(r1)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r1);
+ __ bind(&r1_is_smi);
+ if (mode_ == OVERWRITE_LEFT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ __ bind(&finished_loading_r1);
+ }
+
+ if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
+ __ bind(&do_the_call);
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+
+ if (Token::MUL == op_) {
+ __ vmul(d5, d6, d7);
+ } else if (Token::DIV == op_) {
+ __ vdiv(d5, d6, d7);
+ } else if (Token::ADD == op_) {
+ __ vadd(d5, d6, d7);
+ } else if (Token::SUB == op_) {
+ __ vsub(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ mov(pc, lr);
+ } else {
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
+ __ push(lr); // For later.
+ __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+ #if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(r4, r5, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
+ #else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ #endif
+ __ mov(r0, Operand(r5));
+ // And we are done.
+ __ pop(pc);
+ }
+ }
+ }
+
+ if (!generate_code_to_calculate_answer &&
+ !slow_reverse.is_linked() &&
+ !slow.is_linked()) {
+ return;
+ }
+
+ if (lhs.is(r0)) {
+ __ b(&slow);
+ __ bind(&slow_reverse);
+ __ Swap(r0, r1, ip);
+ }
+
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
+ // We jump to here if something goes wrong (one param is not a number of any
+ // sort or new-space allocation fails).
+ __ bind(&slow);
+
+ // Push arguments to the stack
+ __ Push(r1, r0);
+
+ if (Token::ADD == op_) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[4] : first argument
+
+ Label not_strings, not_string1, string1, string1_smi2;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &not_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1_smi2);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, r0, r2, r4, r5, r6, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ str(r2, MemOperand(sp, 0));
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &not_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(&not_strings);
+ }
+
+ __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value. We truncate towards zero as required
+// by the ES spec. If this is the case we do the bitwise op and see if the
+// result is a Smi. If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in lhs and rhs. On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ Label slow, result_not_a_smi;
+ Label rhs_is_smi, lhs_is_smi;
+ Label done_checking_rhs, done_checking_lhs;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ __ tst(lhs, Operand(kSmiTagMask));
+ __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ __ ConvertToInt32(lhs, r3, r5, r4, &slow);
+ __ jmp(&done_checking_lhs);
+ __ bind(&lhs_is_smi);
+ __ mov(r3, Operand(lhs, ASR, 1));
+ __ bind(&done_checking_lhs);
+
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ __ ConvertToInt32(rhs, r2, r5, r4, &slow);
+ __ jmp(&done_checking_rhs);
+ __ bind(&rhs_is_smi);
+ __ mov(r2, Operand(rhs, ASR, 1));
+ __ bind(&done_checking_rhs);
+
+ ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
+
+ // r0 and r1: Original operands (Smi or heap numbers).
+ // r2 and r3: Signed int32 operands.
+ switch (op_) {
+ case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
+ case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
+ case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of writing
+ // the register as an unsigned int so we go to slow case if we hit this
+ // case.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ __ b(mi, &slow);
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default: UNREACHABLE();
+ }
+ // check that the *signed* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ __ Ret();
+
+ Label have_to_allocate, got_a_heap_number;
+ __ bind(&result_not_a_smi);
+ switch (mode_) {
+ case OVERWRITE_RIGHT: {
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(rhs));
+ break;
+ }
+ case OVERWRITE_LEFT: {
+ __ tst(lhs, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(lhs));
+ break;
+ }
+ case NO_OVERWRITE: {
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+ default: break;
+ }
+ __ bind(&got_a_heap_number);
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+
+ if (mode_ != NO_OVERWRITE) {
+ __ bind(&have_to_allocate);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ __ jmp(&got_a_heap_number);
+ }
+
+ // If all else failed then we go to the runtime system.
+ __ bind(&slow);
+ __ Push(lhs, rhs); // Restore stack.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+
+
+// This function takes the known int in a register for the cases
+// where it doesn't know a good trick, and may deliver
+// a result that needs shifting.
+static void MultiplyByKnownIntInStub(
+ MacroAssembler* masm,
+ Register result,
+ Register source,
+ Register known_int_register, // Smi tagged.
+ int known_int,
+ int* required_shift) { // Including Smi tag shift
+ switch (known_int) {
+ case 3:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 1;
+ break;
+ case 5:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 1;
+ break;
+ case 6:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 2;
+ break;
+ case 7:
+ __ rsb(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 9:
+ __ add(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 10:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 2;
+ break;
+ default:
+ ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
+ __ mul(result, source, known_int_register);
+ *required_shift = 0;
+ }
+}
+
+
+// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
+// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
+// Takes the sum of the digits base (mask + 1) repeatedly until we have a
+// number from 0 to mask. On exit the 'eq' condition flags are set if the
+// answer is exactly the mask.
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ bic(scratch, lhs, Operand(mask));
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift1));
+ __ add(lhs, lhs, Operand(scratch, LSR, shift2));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+// Splits the number into two halves (bottom half has shift bits). The top
+// half is subtracted from the bottom half. If the result is negative then
+// rhs is added.
+void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs) {
+ int mask = (1 << shift) - 1;
+ __ and_(ip, lhs, Operand(mask));
+ __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
+ __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
+}
+
+
+void IntegerModStub::ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator) {
+ int limit = denominator;
+ while (limit * 2 <= max) limit *= 2;
+ while (limit >= denominator) {
+ __ cmp(lhs, Operand(limit));
+ __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
+ limit >>= 1;
+ }
+}
+
+
+void IntegerModStub::ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits) {
+ __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
+ __ Ret();
+}
+
+
+// See comment for class.
+void IntegerModStub::Generate(MacroAssembler* masm) {
+ __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
+ __ bic(odd_number_, odd_number_, Operand(1));
+ __ mov(odd_number_, Operand(odd_number_, LSL, 1));
+ // We now have (odd_number_ - 1) * 2 in the register.
+ // Build a switch out of branches instead of data because it avoids
+ // having to teach the assembler about intra-code-object pointers
+ // that are not in relative branch instructions.
+ Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
+ Label mod21, mod23, mod25;
+ { Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ add(pc, pc, Operand(odd_number_));
+ // When you read pc it is always 8 ahead, but when you write it you always
+ // write the actual value. So we put in two nops to take up the slack.
+ __ nop();
+ __ nop();
+ __ b(&mod3);
+ __ b(&mod5);
+ __ b(&mod7);
+ __ b(&mod9);
+ __ b(&mod11);
+ __ b(&mod13);
+ __ b(&mod15);
+ __ b(&mod17);
+ __ b(&mod19);
+ __ b(&mod21);
+ __ b(&mod23);
+ __ b(&mod25);
+ }
+
+ // For each denominator we find a multiple that is almost only ones
+ // when expressed in binary. Then we do the sum-of-digits trick for
+ // that number. If the multiple is not 1 then we have to do a little
+ // more work afterwards to get the answer into the 0-denominator-1
+ // range.
+ DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
+ __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
+ ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
+ __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
+ ModReduce(masm, lhs_, 0x3f, 11);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 13);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
+ __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 19);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
+ ModReduce(masm, lhs_, 0x3f, 21);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
+ ModReduce(masm, lhs_, 0xff, 23);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
+ ModReduce(masm, lhs_, 0x7f, 25);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ // lhs_ : x
+ // rhs_ : y
+ // r0 : result
+
+ Register result = r0;
+ Register lhs = lhs_;
+ Register rhs = rhs_;
+
+ // This code can't cope with other register allocations yet.
+ ASSERT(result.is(r0) &&
+ ((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0))));
+
+ Register smi_test_reg = r7;
+ Register scratch = r9;
+
+ // All ops need to know whether we are dealing with two Smis. Set up
+ // smi_test_reg to tell us that.
+ if (ShouldGenerateSmiCode()) {
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ Label not_smi;
+ // Fast path.
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
+ break;
+ }
+
+ case Token::SUB: {
+ Label not_smi;
+ // Fast path.
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ if (lhs.is(r1)) {
+ __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
+ } else {
+ __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
+ }
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
+ break;
+ }
+
+ case Token::MUL: {
+ Label not_smi, slow;
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ __ b(ne, &not_smi);
+ // Remove tag from one operand (but keep sign), so that result is Smi.
+ __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
+ // Do multiplication
+ // scratch = lower 32 bits of ip * lhs.
+ __ smull(scratch, scratch2, lhs, ip);
+ // Go slow on overflows (overflow bit is not set).
+ __ mov(ip, Operand(scratch, ASR, 31));
+ // No overflow if higher 33 bits are identical.
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, &slow);
+ // Go slow on zero result to handle -0.
+ __ tst(scratch, Operand(scratch));
+ __ mov(result, Operand(scratch), LeaveCC, ne);
+ __ Ret(ne);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(scratch2, rhs, Operand(lhs), SetCC);
+ __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
+ // Slow case. We fall through here if we multiplied a negative number
+ // with 0, because that would mean we should produce -0.
+ __ bind(&slow);
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
+ break;
+ }
+
+ case Token::DIV:
+ case Token::MOD: {
+ Label not_smi;
+ if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
+ Label lhs_is_unsuitable;
+ __ BranchOnNotSmi(lhs, &not_smi);
+ if (IsPowerOf2(constant_rhs_)) {
+ if (op_ == Token::MOD) {
+ __ and_(rhs,
+ lhs,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
+ SetCC);
+ // We now have the answer, but if the input was negative we also
+ // have the sign bit. Our work is done if the result is
+ // positive or zero:
+ if (!rhs.is(r0)) {
+ __ mov(r0, rhs, LeaveCC, pl);
+ }
+ __ Ret(pl);
+ // A mod of a negative left hand side must return a negative number.
+ // Unfortunately if the answer is 0 then we must return -0. And we
+ // already optimistically trashed rhs so we may need to restore it.
+ __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
+ // Next two instructions are conditional on the answer being -0.
+ __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+ __ b(eq, &lhs_is_unsuitable);
+ // We need to subtract the dividend. Eg. -3 % 4 == -3.
+ __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
+ } else {
+ ASSERT(op_ == Token::DIV);
+ __ tst(lhs,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
+ __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
+ int shift = 0;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ d >>= 1;
+ shift++;
+ }
+ __ mov(r0, Operand(lhs, LSR, shift));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ }
+ } else {
+ // Not a power of 2.
+ __ tst(lhs, Operand(0x80000000u));
+ __ b(ne, &lhs_is_unsuitable);
+ // Find a fixed point reciprocal of the divisor so we can divide by
+ // multiplying.
+ double divisor = 1.0 / constant_rhs_;
+ int shift = 32;
+ double scale = 4294967296.0; // 1 << 32.
+ uint32_t mul;
+ // Maximise the precision of the fixed point reciprocal.
+ while (true) {
+ mul = static_cast<uint32_t>(scale * divisor);
+ if (mul >= 0x7fffffff) break;
+ scale *= 2.0;
+ shift++;
+ }
+ mul++;
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ __ mov(scratch2, Operand(mul));
+ __ umull(scratch, scratch2, scratch2, lhs);
+ __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
+ // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
+ // rhs is still the known rhs. rhs is Smi tagged.
+ // lhs is still the unkown lhs. lhs is Smi tagged.
+ int required_scratch_shift = 0; // Including the Smi tag shift of 1.
+ // scratch = scratch2 * rhs.
+ MultiplyByKnownIntInStub(masm,
+ scratch,
+ scratch2,
+ rhs,
+ constant_rhs_,
+ &required_scratch_shift);
+ // scratch << required_scratch_shift is now the Smi tagged rhs *
+ // (lhs / rhs) where / indicates integer division.
+ if (op_ == Token::DIV) {
+ __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
+ __ b(ne, &lhs_is_unsuitable); // There was a remainder.
+ __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
+ } else {
+ ASSERT(op_ == Token::MOD);
+ __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
+ }
+ }
+ __ Ret();
+ __ bind(&lhs_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense. We can handle
+ // 1 to 25 times any power of 2. This covers over half the numbers from
+ // 1 to 100 including all of the first 25. (Actually the constants < 10
+ // are handled above by reciprocal multiplication. We only get here for
+ // those cases if the right hand side is not a constant or for cases
+ // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
+ // stub.)
+ Label slow;
+ Label not_power_of_2;
+ ASSERT(!ShouldGenerateSmiCode());
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ Register mask_bits = r3;
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ and_(mask_bits, rhs, Operand(scratch), SetCC);
+ __ b(ne, &not_power_of_2);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+
+ __ bind(&not_power_of_2);
+ __ eor(scratch, scratch, Operand(mask_bits));
+ // At least two bits are set in the modulus. The high one(s) are in
+ // mask_bits and the low one is scratch + 1.
+ __ and_(mask_bits, scratch, Operand(lhs));
+ Register shift_distance = scratch;
+ scratch = no_reg;
+
+ // The rhs consists of a power of 2 multiplied by some odd number.
+ // The power-of-2 part we handle by putting the corresponding bits
+ // from the lhs in the mask_bits register, and the power in the
+ // shift_distance register. Shift distance is never 0 due to Smi
+ // tagging.
+ __ CountLeadingZeros(r4, shift_distance, shift_distance);
+ __ rsb(shift_distance, r4, Operand(32));
+
+ // Now we need to find out what the odd number is. The last bit is
+ // always 1.
+ Register odd_number = r4;
+ __ mov(odd_number, Operand(rhs, LSR, shift_distance));
+ __ cmp(odd_number, Operand(25));
+ __ b(gt, &slow);
+
+ IntegerModStub stub(
+ result, shift_distance, odd_number, mask_bits, lhs, r5);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
+
+ __ bind(&slow);
+ }
+ HandleBinaryOpSlowCases(
+ masm,
+ &not_smi,
+ lhs,
+ rhs,
+ op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label slow;
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ switch (op_) {
+ case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
+ case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
+ case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(result, Operand(lhs, ASR, scratch2));
+ // Smi tag result.
+ __ bic(result, result, Operand(kSmiTagMask));
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(scratch, Operand(scratch, LSR, scratch2));
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ tst(scratch, Operand(0xc0000000));
+ __ b(ne, &slow);
+ // Smi tag result.
+ __ mov(result, Operand(scratch, LSL, kSmiTagSize));
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(scratch, Operand(scratch, LSL, scratch2));
+ // Check that the signed result fits in a Smi.
+ __ add(scratch2, scratch, Operand(0x40000000), SetCC);
+ __ b(mi, &slow);
+ __ mov(result, Operand(scratch, LSL, kSmiTagSize));
+ break;
+ default: UNREACHABLE();
+ }
+ __ Ret();
+ __ bind(&slow);
+ HandleNonSmiBitwiseOp(masm, lhs, rhs);
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+ // This code should be unreachable.
+ __ stop("Unreachable");
+
+ // Generate an unreachable reference to the DEFAULT stub so that it can be
+ // found at the end of this stub when clearing ICs at GC.
+ // TODO(kaznacheev): Check performance impact and get rid of this.
+ if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+ GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+ __ CallStub(&uninit);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
+ __ Push(r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Argument is a number and is on stack and in r0.
+ Label runtime_call;
+ Label input_not_smi;
+ Label loaded;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Load argument and check if it is a smi.
+ __ BranchOnNotSmi(r0, &input_not_smi);
+
+ CpuFeatures::Scope scope(VFP3);
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &runtime_call,
+ true);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ __ mov(r0,
+ Operand(ExternalReference::transcendental_cache_array_address()));
+ // r0 points to cache array.
+ __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ b(eq, &runtime_call);
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(r0, r0, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ b(ne, &runtime_call);
+ __ cmp(r3, r5);
+ __ b(ne, &runtime_call);
+ // Cache hit. Load result, pop argument and return.
+ __ mov(r0, Operand(r6));
+ __ pop();
+ __ Ret();
+ }
+
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Do tail-call to runtime routine. Runtime routines expect at least one
+ // argument, so give it a Smi.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ push(r0);
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+
+ __ StubReturn(1);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
+
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ if (negative_zero_ == kStrictNegativeZero) {
+ // If we have to check for zero, then we can check for the max negative
+ // smi while we are at it.
+ __ bic(ip, r0, Operand(0x80000000), SetCC);
+ __ b(eq, &slow);
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+ __ StubReturn(1);
+ } else {
+ // The value of the expression is a smi and 0 is OK for -0. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
+ __ StubReturn(1, vc);
+ // We don't have to reverse the optimistic neg since the only case
+ // where we fall through is the minimum negative Smi, which is the case
+ // where the neg leaves the register unchanged.
+ __ jmp(&slow); // Go slow on max negative Smi.
+ }
+
+ __ bind(&try_float);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ __ ConvertToInt32(r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_ == UNARY_OVERWRITE) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ mov(r0, Operand(r2));
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ push(r0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // r0 holds the exception.
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r2);
+ __ str(r2, MemOperand(r3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ __ cmp(fp, Operand(0, RelocInfo::NONE));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ ldr(r2, MemOperand(sp, kStateOffset));
+ __ cmp(r2, Operand(StackHandler::ENTRY));
+ __ b(eq, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ ldr(sp, MemOperand(sp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r2);
+ __ str(r2, MemOperand(r3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(r0, Operand(false));
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(r2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // lr
+
+ // Discard handler state (r2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ __ cmp(fp, Operand(0, RelocInfo::NONE));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate,
+ int frame_alignment_skew) {
+ // r0: result parameter for PerformGC, if any
+ // r4: number of arguments including receiver (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to the first argument (C callee-saved)
+
+ if (do_gc) {
+ // Passing r0.
+ __ PrepareCallCFunction(1, r1);
+ __ CallCFunction(ExternalReference::perform_gc_function(), 1);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate) {
+ __ mov(r0, Operand(scope_depth));
+ __ ldr(r1, MemOperand(r0));
+ __ add(r1, r1, Operand(1));
+ __ str(r1, MemOperand(r0));
+ }
+
+ // Call C built-in.
+ // r0 = argc, r1 = argv
+ __ mov(r0, Operand(r4));
+ __ mov(r1, Operand(r6));
+
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+#if defined(V8_HOST_ARCH_ARM)
+ if (FLAG_debug_code) {
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ sub(r2, sp, Operand(frame_alignment_skew));
+ __ tst(r2, Operand(frame_alignment_mask));
+ __ b(eq, &alignment_as_expected);
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ __ stop("Unexpected alignment");
+ __ bind(&alignment_as_expected);
+ }
+ }
+#endif
+
+ // Just before the call (jump) below lr is pushed, so the actual alignment is
+ // adding one to the current skew.
+ int alignment_before_call =
+ (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
+ if (alignment_before_call > 0) {
+ // Push until the alignment before the call is met.
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ for (int i = alignment_before_call;
+ (i & frame_alignment_mask) != 0;
+ i += kPointerSize) {
+ __ push(r2);
+ }
+ }
+
+ // TODO(1242173): To let the GC traverse the return address of the exit
+ // frames, we need to know where the return address is. Right now,
+ // we push it on the stack to be able to find it again, but we never
+ // restore from it in case of changes, which makes it impossible to
+ // support moving the C entry code stub. This should be fixed, but currently
+ // this is OK because the CEntryStub gets generated so early in the V8 boot
+ // sequence that it is not moving ever.
+ masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
+ masm->push(lr);
+ masm->Jump(r5);
+
+ // Restore sp back to before aligning the stack.
+ if (alignment_before_call > 0) {
+ __ add(sp, sp, Operand(alignment_before_call));
+ }
+
+ if (always_allocate) {
+ // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
+ // though (contain the result).
+ __ mov(r2, Operand(scope_depth));
+ __ ldr(r3, MemOperand(r2));
+ __ sub(r3, r3, Operand(1));
+ __ str(r3, MemOperand(r2));
+ }
+
+ // check for failure result
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ // Lower 2 bits of r2 are 0 iff r0 has failure tag.
+ __ add(r2, r0, Operand(1));
+ __ tst(r2, Operand(kFailureTagMask));
+ __ b(eq, &failure_returned);
+
+ // Exit C frame and return.
+ // r0:r1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame();
+
+ // check if we should retry or throw exception
+ Label retry;
+ __ bind(&failure_returned);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ b(eq, &retry);
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ b(eq, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r3, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r0, MemOperand(ip));
+ __ str(r3, MemOperand(ip));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(r0, Operand(Factory::termination_exception()));
+ __ b(eq, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // r0: number of arguments including receiver
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // Result returned in r0 or r0+r1 by default.
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // r4: number of arguments (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to first argument (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false,
+ -kPointerSize);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false,
+ 0);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true,
+ kPointerSize);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+ Label invoke, exit;
+
+ // Called from C, so do not pop argc and args on exit (preserve sp)
+ // No need to save register-passed args
+ // Save callee-saved registers (incl. cp and fp), sp, and lr
+ __ stm(db_w, sp, kCalleeSaved | lr.bit());
+
+ // Get address of argv, see stm above.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r6, Operand(Smi::FromInt(marker)));
+ __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ ldr(r5, MemOperand(r5));
+ __ Push(r8, r7, r6, r5);
+
+ // Setup frame pointer for the frame to be pushed.
+ __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Call a faked try-block that does the invoke.
+ __ bl(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(ip));
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r0-r4, r5-r7 are available.
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bl(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r5, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r5, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(ip, Operand(entry));
+ }
+ __ ldr(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline. We don't use the double underscore
+ // macro for the add instruction because we don't want the coverage tool
+ // inserting instructions here after we read the pc.
+ __ mov(lr, Operand(pc));
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r3, MemOperand(ip));
+ // No need to restore registers
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+
+
+ __ bind(&exit); // r0 holds result
+ // Restore the top frame descriptors from the stack.
+ __ pop(r3);
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(r3, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Restore callee-saved registers and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary. Uses r1 for the object, r0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - slow case for smis (we may need to throw an exception
+ // depending on the rhs).
+ Label slow, loop, is_instance, is_not_instance;
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ BranchOnSmi(r0, &slow);
+
+ // Check that the left hand is a JS object and put map in r3.
+ __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ // Get the prototype of the function (r4 is result, r2 is scratch).
+ __ ldr(r1, MemOperand(sp, 0));
+ // r1 is function, r3 is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr));
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ BranchOnSmi(r4, &slow);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
+
+ // Register mapping: r3 is object map and r4 is function prototype.
+ // Get prototype of object into r2.
+ __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ bind(&loop);
+ __ cmp(r2, Operand(r4));
+ __ b(eq, &is_instance);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(eq, &is_not_instance);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&is_not_instance);
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ BranchOnNotSmi(r1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register r0. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the adaptor frame and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ ldr(r1, MemOperand(sp, 0));
+ __ b(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r1, MemOperand(sp, 0));
+ __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ b(eq, &add_arguments_object);
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(
+ r1,
+ r0,
+ r2,
+ r3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(r4, offset));
+
+ // Copy the JS object part.
+ __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ b(eq, &done);
+
+ // Get the parameters pointer from the stack.
+ __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup r4 to point to the first array slot.
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement r2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+ // Post-increment r4 with kPointerSize on each iteration.
+ __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
+ __ sub(r1, r1, Operand(1));
+ __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ b(ne, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ Register subject = r4;
+ Register regexp_data = r5;
+ Register last_match_info_elements = r6;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(r0, Operand(address_of_regexp_stack_memory_size));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ tst(r0, Operand(r0));
+ __ b(eq, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+ __ b(ne, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ tst(regexp_data, Operand(kSmiTagMask));
+ __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+ __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
+ __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ b(ne, &runtime);
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ ldr(r2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(r2, r2, Operand(2)); // r2 was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ b(hi, &runtime);
+
+ // r2: Number of capture registers
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the second argument is a string.
+ __ ldr(subject, MemOperand(sp, kSubjectOffset));
+ __ tst(subject, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ Condition is_string = masm->IsObjectStringType(subject, r0);
+ __ b(NegateCondition(is_string), &runtime);
+ // Get the length of the string to r3.
+ __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
+
+ // r2: Number of capture registers
+ // r3: Length of subject string as a smi
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &runtime);
+ __ cmp(r3, Operand(r0));
+ __ b(ls, &runtime);
+
+ // r2: Number of capture registers
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the fourth object is a JSArray object.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
+ __ b(gt, &runtime);
+
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string;
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // First check for flat string.
+ __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ b(eq, &seq_string);
+
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
+ __ b(ne, &runtime);
+ __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
+ __ cmp(r0, r1);
+ __ b(ne, &runtime);
+ __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r0, Operand(kStringRepresentationMask));
+ __ b(nz, &runtime);
+
+ __ bind(&seq_string);
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // r0: Instance type of subject string
+ STATIC_ASSERT(4 == kAsciiStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Find the code object based on the assumptions above.
+ __ and_(r0, r0, Operand(kStringEncodingMask));
+ __ mov(r3, Operand(r0, ASR, 2), SetCC);
+ __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CompareObjectType(r7, r0, r0, CODE_TYPE);
+ __ b(ne, &runtime);
+
+ // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r7: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+
+ // r1: previous index
+ // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r7: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ push(lr);
+ __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
+
+ // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
+ __ mov(r0, Operand(1));
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
+ __ mov(r0, Operand(address_of_regexp_stack_memory_address));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ mov(r2, Operand(address_of_regexp_stack_memory_size));
+ __ ldr(r2, MemOperand(r2, 0));
+ __ add(r0, r0, Operand(r2));
+ __ str(r0, MemOperand(sp, 1 * kPointerSize));
+
+ // Argument 5 (sp[0]): static offsets vector buffer.
+ __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
+ __ str(r0, MemOperand(sp, 0 * kPointerSize));
+
+ // For arguments 4 and 3 get string length, calculate start of string data and
+ // calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ eor(r3, r3, Operand(1));
+ // Argument 4 (r3): End of string data
+ // Argument 3 (r2): Start of string data
+ __ add(r2, r9, Operand(r1, LSL, r3));
+ __ add(r3, r9, Operand(r0, LSL, r3));
+
+ // Argument 2 (r1): Previous index.
+ // Already there
+
+ // Argument 1 (r0): Subject string.
+ __ mov(r0, subject);
+
+ // Locate the code entry and call it.
+ __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r7, kRegExpExecuteArguments);
+ __ pop(lr);
+
+ // r0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+
+ // Check the result.
+ Label success;
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ b(eq, &success);
+ Label failure;
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ b(eq, &failure);
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ b(ne, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r1, MemOperand(r1, 0));
+ __ cmp(r0, r1);
+ __ b(eq, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(r0, Operand(Factory::null_value()));
+ __ add(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ ldr(r1,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(r1, r1, Operand(2)); // r1 was a smi.
+
+ // r1: number of capture registers
+ // r4: subject string
+ // Store the capture count.
+ __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
+ __ str(r2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
+ __ str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ mov(r3, last_match_info_elements);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(r2, Operand(address_of_static_offsets_vector));
+
+ // r1: number of capture registers
+ // r2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ add(r0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ b(mi, &done);
+ // Read the value from the static offsets vector buffer.
+ __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
+ // Store the smi value in the last match info.
+ __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+ __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ add(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ BranchOnSmi(r1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ LeaveInternalFrame();
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ BranchOnSmi(r1, &slow);
+ // Get the map of the function object.
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+
+ // Fast-case: Invoke the function now.
+ // r1: pushed function
+ ParameterCount actual(argc_);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ mov(r0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
+ const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == eq || cc_ == ne)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s%s%s",
+ cc_name,
+ lhs_name,
+ rhs_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+ | RegisterField::encode(lhs_.is(r0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ BranchOnSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ tst(result_, Operand(kIsNotStringMask));
+ __ b(ne, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ BranchOnNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmp(ip, Operand(scratch_));
+ __ b(ls, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
+
+ // Handle non-flat strings.
+ __ tst(result_, Operand(kIsConsStringMask));
+ __ b(eq, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(ne, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(nz, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ tst(result_, Operand(kStringEncodingMask));
+ __ b(nz, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ add(scratch_, object_, Operand(scratch_));
+ __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ true);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(scratch_, r0);
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Move(result_, r0);
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ tst(code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ b(nz, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ascii char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(eq, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, r0);
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0, RelocInfo::NONE));
+ }
+ __ b(eq, &done);
+
+ __ bind(&loop);
+ __ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ // Perform sub between load and dependent store to get the load time to
+ // complete.
+ __ sub(count, count, Operand(1), SetCC);
+ __ strb(scratch, MemOperand(dest, 1, PostIndex));
+ // last iteration.
+ __ b(gt, &loop);
+
+ __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ tst(dest, Operand(kPointerAlignmentMask));
+ __ Check(eq, "Destination of copy not aligned.");
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0, RelocInfo::NONE));
+ }
+ __ b(eq, &done);
+
+ // Assume that you cannot read (or write) unaligned.
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ cmp(count, Operand(8));
+ __ add(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ b(lt, &byte_loop);
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
+ Label dest_aligned;
+ __ b(eq, &dest_aligned);
+ __ cmp(scratch4, Operand(2));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
+ __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
+ __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
+ __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ sub(scratch4, dest, Operand(src));
+ __ and_(scratch4, scratch4, Operand(0x03), SetCC);
+ __ b(eq, &simple_loop);
+ // Shift register is number of bits in a source word that
+ // must be combined with bits in the next source word in order
+ // to create a destination word.
+
+ // Complex loop for src/dst that are not aligned the same way.
+ {
+ Label loop;
+ __ mov(scratch4, Operand(scratch4, LSL, 3));
+ Register left_shift = scratch4;
+ __ and_(src, src, Operand(~3)); // Round down to load previous word.
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ // Store the "shift" most significant bits of scratch in the least
+ // signficant bits (i.e., shift down by (32-shift)).
+ __ rsb(scratch2, left_shift, Operand(32));
+ Register right_shift = scratch2;
+ __ mov(scratch1, Operand(scratch1, LSR, right_shift));
+
+ __ bind(&loop);
+ __ ldr(scratch3, MemOperand(src, 4, PostIndex));
+ __ sub(scratch5, limit, Operand(dest));
+ __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ __ mov(scratch1, Operand(scratch3, LSR, right_shift));
+ // Loop if four or more bytes left to copy.
+ // Compare to eight, because we did the subtract before increasing dst.
+ __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ b(ge, &loop);
+ }
+ // There is now between zero and three bytes left to copy (negative that
+ // number is in scratch5), and between one and three bytes already read into
+ // scratch1 (eight times that number in scratch4). We may have read past
+ // the end of the string, but because objects are aligned, we have not read
+ // past the end of the object.
+ // Find the minimum of remaining characters to move and preloaded characters
+ // and write those as bytes.
+ __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ b(eq, &done);
+ __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ // Move minimum of bytes read and bytes left to copy to scratch4.
+ __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch5) characters already read into
+ // scratch ready to write.
+ __ cmp(scratch5, Operand(2));
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
+ // Copy any remaining bytes.
+ __ b(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dst, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ __ sub(scratch3, limit, Operand(dest));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ // Compare to 8, not 4, because we do the substraction before increasing
+ // dest.
+ __ cmp(scratch3, Operand(8));
+ __ b(ge, &loop);
+ }
+
+ // Copy bytes from src to dst until dst hits limit.
+ __ bind(&byte_loop);
+ __ cmp(dest, Operand(limit));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
+ __ b(ge, &done);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ b(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ sub(scratch, c1, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+ __ b(hi, &not_array_index);
+ __ sub(scratch, c2, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register
+ __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
+ __ b(ls, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Load undefined value
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ mov(mask, Operand(mask, ASR, 1));
+ __ sub(mask, mask, Operand(1));
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ add(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ Register candidate = scratch5; // Scratch register contains candidate.
+
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ and_(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ ldr(candidate,
+ MemOperand(first_symbol_table_element,
+ candidate,
+ LSL,
+ kPointerSizeLog2));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, undefined);
+ __ b(eq, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(2)));
+ __ b(ne, &next_probe[i]);
+
+ // Check that the candidate is a non-external ascii string.
+ __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
+ &next_probe[i]);
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ cmp(chars, scratch);
+ __ b(eq, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ Move(r0, result);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = character + (character << 10);
+ __ add(hash, character, Operand(character, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ add(hash, hash, Operand(character));
+ // hash += hash << 10;
+ __ add(hash, hash, Operand(hash, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ add(hash, hash, Operand(hash, LSL, 3));
+ // hash ^= hash >> 11;
+ __ eor(hash, hash, Operand(hash, ASR, 11));
+ // hash += hash << 15;
+ __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+
+ // if (hash == 0) hash = 27;
+ __ mov(hash, Operand(27), LeaveCC, nz);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+
+ // Check bounds and smi-ness.
+ __ ldr(r7, MemOperand(sp, kToOffset));
+ __ ldr(r6, MemOperand(sp, kFromOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ // I.e., arithmetic shift right by one un-smi-tags.
+ __ mov(r2, Operand(r7, ASR, 1), SetCC);
+ __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
+ // If either r2 or r6 had the smi tag bit set, then carry is set now.
+ __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ __ b(mi, &runtime); // From is negative.
+
+ __ sub(r2, r2, Operand(r3), SetCC);
+ __ b(mi, &runtime); // Fail if from > to.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ cmp(r2, Operand(2));
+ __ b(lt, &runtime);
+
+ // r2: length
+ // r3: from index (untaged smi)
+ // r6: from (smi)
+ // r7: to (smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ ldr(r5, MemOperand(sp, kStringOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r5, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ Condition is_string = masm->IsObjectStringType(r5, r1);
+ __ b(NegateCondition(is_string), &runtime);
+
+ // r1: instance type
+ // r2: length
+ // r3: from index (untaged smi)
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ Label seq_string;
+ __ and_(r4, r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ __ cmp(r4, Operand(kConsStringTag));
+ __ b(gt, &runtime); // External strings go to runtime.
+ __ b(lt, &seq_string); // Sequential strings are handled directly.
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
+ __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ tst(r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ b(ne, &runtime); // Cons and External strings go to runtime.
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // r1: instance type.
+ // r2: length
+ // r3: from index (untaged smi)
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+ __ cmp(r4, Operand(r7));
+ __ b(lt, &runtime); // Fail if to > length.
+
+ // r1: instance type.
+ // r2: result string length.
+ // r3: from index (untaged smi)
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat ascii string.
+ Label non_ascii_flat;
+ __ tst(r1, Operand(kStringEncodingMask));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ b(eq, &non_ascii_flat);
+
+ Label result_longer_than_two;
+ __ cmp(r2, Operand(2));
+ __ b(gt, &result_longer_than_two);
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ add(r5, r5, Operand(r3));
+ __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
+ __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // r2: result string length.
+ // r3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
+ __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
+ // Allocate the result.
+ __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r6, ASR, 1));
+
+ // r0: result string.
+ // r1: first character of result string.
+ // r2: result string length.
+ // r5: first character of sub string to copy.
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ add(r5, r5, Operand(r6));
+
+ // r0: result string.
+ // r1: first character of result.
+ // r2: result length.
+ // r5: first character of string to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label compare_lengths;
+ // Find minimum length and length difference.
+ __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+ Register length_delta = scratch3;
+ __ mov(scratch1, scratch2, LeaveCC, gt);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(min_length, Operand(min_length));
+ __ b(eq, &compare_lengths);
+
+ // Untag smi.
+ __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+
+ // Setup registers so that we only need to increment one register
+ // in the loop.
+ __ add(scratch2, min_length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch2));
+ __ add(right, right, Operand(scratch2));
+ // Registers left and right points to the min_length character of strings.
+ __ rsb(min_length, min_length, Operand(-1));
+ Register index = min_length;
+ // Index starts at -min_length.
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ add(index, index, Operand(1), SetCC);
+ __ ldrb(scratch2, MemOperand(left, index), ne);
+ __ ldrb(scratch4, MemOperand(right, index), ne);
+ // Skip to compare lengths with eq condition true.
+ __ b(eq, &compare_lengths);
+ __ cmp(scratch2, scratch4);
+ __ b(eq, &loop);
+ // Fallthrough with eq condition false.
+ }
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use zero length_delta as result.
+ __ mov(r0, Operand(length_delta), SetCC, eq);
+ // Fall through to here if characters compare not-equal.
+ __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+ __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(r0, r1);
+ __ b(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+ // Compare flat ascii strings natively. Remove arguments from stack first.
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+ // Stack on entry:
+ // sp[0]: second argument.
+ // sp[4]: first argument.
+
+ // Load the two arguments.
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+ // Load instance types.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kStringTag == 0);
+ // If either is not a string, go to runtime.
+ __ tst(r4, Operand(kIsNotStringMask));
+ __ tst(r5, Operand(kIsNotStringMask), eq);
+ __ b(ne, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // r0: first string
+ // r1: second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
+ __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
+ STATIC_ASSERT(kSmiTag == 0);
+ // Else test if second string is empty.
+ __ cmp(r3, Operand(Smi::FromInt(0)), ne);
+ __ b(ne, &strings_not_empty); // If either string was empty, return r0.
+
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize));
+ // Both strings are non-empty.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ // Adding two lengths can't overflow.
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+ __ add(r6, r2, Operand(r3));
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(r6, Operand(2));
+ __ b(ne, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in r2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode)
+ __ mov(r6, Operand(2));
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+ __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ cmp(r6, Operand(String::kMinNonFlatLength));
+ __ b(lt, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ cmp(r6, Operand(String::kMaxLength + 1));
+ __ b(hs, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ascii the result is an ascii cons string.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated, ascii_data;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ tst(r5, Operand(kStringEncodingMask), ne);
+ __ b(eq, &non_ascii);
+
+ // Allocate an ASCII cons string.
+ __ bind(&ascii_data);
+ __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // r6: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r4, Operand(kStringRepresentationMask));
+ __ tst(r5, Operand(kStringRepresentationMask), eq);
+ __ b(ne, &string_add_runtime);
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of lengths..
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ eor(r7, r4, Operand(r5));
+ __ tst(r7, Operand(kStringEncodingMask));
+ __ b(ne, &string_add_runtime);
+ // And see if it's ASCII or two-byte.
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ b(eq, &non_ascii_string_add_flat_result);
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // r6: length of resulting flat string
+ __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+ // Load second argument and locate first character.
+ __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of length of strings.
+ __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r7: result string.
+
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+ // Locate first character of second argument.
+ __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result (after copy of first string).
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
new file mode 100644
index 0000000000..2e07e3b5c7
--- /dev/null
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -0,0 +1,491 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CODE_STUBS_ARM_H_
+#define V8_ARM_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ static const int kUnknownIntValue = -1;
+
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ Register lhs,
+ Register rhs,
+ int constant_rhs = kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ lhs_(lhs),
+ rhs_(rhs),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) { }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ lhs_(LhsRegister(RegisterBits::decode(key))),
+ rhs_(RhsRegister(RegisterBits::decode(key))),
+ constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
+ runtime_operands_type_(type_info),
+ name_(NULL) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ Register lhs_;
+ Register rhs_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+ char* name_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+ static const int kKnownRhsKeyBits = 6;
+
+ // Minor key encoding in 17 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class TypeInfoBits: public BitField<int, 8, 2> {};
+ class RegisterBits: public BitField<bool, 10, 1> {};
+ class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt())
+ | TypeInfoBits::encode(runtime_operands_type_)
+ | RegisterBits::encode(lhs_.is(r0));
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+ void HandleBinaryOpSlowCases(MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
+ return key;
+ }
+
+ int KnownBitsForMinorKey(int key) {
+ if (!key) return 0;
+ if (key <= 11) return key - 1;
+ int d = 1;
+ while (key != 12) {
+ key--;
+ d <<= 1;
+ }
+ return d;
+ }
+
+ Register LhsRegister(bool lhs_is_r0) {
+ return lhs_is_r0 ? r0 : r1;
+ }
+
+ Register RhsRegister(bool lhs_is_r0) {
+ return lhs_is_r0 ? r1 : r0;
+ }
+
+ bool ShouldGenerateSmiCode() {
+ return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in r0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+// This stub can do a fast mod operation without using fp.
+// It is tail called from the GenericBinaryOpStub and it always
+// returns an answer. It never causes GC so it doesn't need a real frame.
+//
+// The inputs are always positive Smis. This is never called
+// where the denominator is a power of 2. We handle that separately.
+//
+// If we consider the denominator as an odd number multiplied by a power of 2,
+// then:
+// * The exponent (power of 2) is in the shift_distance register.
+// * The odd number is in the odd_number register. It is always in the range
+// of 3 to 25.
+// * The bits from the numerator that are to be copied to the answer (there are
+// shift_distance of them) are in the mask_bits register.
+// * The other bits of the numerator have been shifted down and are in the lhs
+// register.
+class IntegerModStub : public CodeStub {
+ public:
+ IntegerModStub(Register result,
+ Register shift_distance,
+ Register odd_number,
+ Register mask_bits,
+ Register lhs,
+ Register scratch)
+ : result_(result),
+ shift_distance_(shift_distance),
+ odd_number_(odd_number),
+ mask_bits_(mask_bits),
+ lhs_(lhs),
+ scratch_(scratch) {
+ // We don't code these in the minor key, so they should always be the same.
+ // We don't really want to fix that since this stub is rather large and we
+ // don't want many copies of it.
+ ASSERT(shift_distance_.is(r9));
+ ASSERT(odd_number_.is(r4));
+ ASSERT(mask_bits_.is(r3));
+ ASSERT(scratch_.is(r5));
+ }
+
+ private:
+ Register result_;
+ Register shift_distance_;
+ Register odd_number_;
+ Register mask_bits_;
+ Register lhs_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class ResultRegisterBits: public BitField<int, 0, 4> {};
+ class LhsRegisterBits: public BitField<int, 4, 4> {};
+
+ Major MajorKey() { return IntegerMod; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return ResultRegisterBits::encode(result_.code())
+ | LhsRegisterBits::encode(lhs_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "IntegerModStub"; }
+
+ // Utility functions.
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry);
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry);
+ void ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs);
+ void ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator);
+ void ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits);
+
+
+#ifdef DEBUG
+ void Print() { PrintF("IntegerModStub\n"); }
+#endif
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register offset, Register scratch)
+ : object_(object), offset_(offset), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register offset_;
+ Register scratch_;
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, offset and scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class OffsetBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ OffsetBits::encode(offset_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
+ " (scratch reg %d)\n",
+ object_.code(), offset_.code(), scratch_.code());
+ }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index df17b6f864..f985fb4ba1 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -49,27 +50,6 @@ namespace v8 {
namespace internal {
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc,
- bool never_nan_nan);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-static void MultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int);
-static bool IsEasyToMultiplyBy(int x);
-
-
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
@@ -790,7 +770,7 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
ToBooleanStub stub(tos);
frame_->CallStub(&stub, 0);
// Convert the result in "tos" to a condition code.
- __ cmp(tos, Operand(0));
+ __ cmp(tos, Operand(0, RelocInfo::NONE));
} else {
// Implements slow case by calling the runtime.
frame_->EmitPush(tos);
@@ -937,16 +917,55 @@ class DeferredInlineSmiOperation: public DeferredCode {
}
virtual void Generate();
+ // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+ // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
+ // methods, it is the responsibility of the deferred code to save and restore
+ // registers.
+ virtual bool AutoSaveAndRestore() { return false; }
+
+ void JumpToNonSmiInput(Condition cond);
+ void JumpToAnswerOutOfRange(Condition cond);
private:
+ void GenerateNonSmiInput();
+ void GenerateAnswerOutOfRange();
+ void WriteNonSmiAnswer(Register answer,
+ Register heap_number,
+ Register scratch);
+
Token::Value op_;
int value_;
bool reversed_;
OverwriteMode overwrite_mode_;
Register tos_register_;
+ Label non_smi_input_;
+ Label answer_out_of_range_;
};
+// For bit operations we try harder and handle the case where the input is not
+// a Smi but a 32bits integer without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
+ ASSERT(Token::IsBitOp(op_));
+
+ __ b(cond, &non_smi_input_);
+}
+
+
+// For bit operations the result is always 32bits so we handle the case where
+// the result does not fit in a Smi without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
+ ASSERT(Token::IsBitOp(op_));
+
+ if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
+ // >>> requires an unsigned to double conversion and the non VFP code
+ // does not support this conversion.
+ __ b(cond, entry_label());
+ } else {
+ __ b(cond, &answer_out_of_range_);
+ }
+}
+
// On entry the non-constant side of the binary operation is in tos_register_
// and the constant smi side is nowhere. The tos_register_ is not used by the
@@ -1025,6 +1044,172 @@ void DeferredInlineSmiOperation::Generate() {
// came into this function with, so we can merge back to that frame
// without trashing it.
copied_frame.MergeTo(frame_state()->frame());
+
+ Exit();
+
+ if (non_smi_input_.is_linked()) {
+ GenerateNonSmiInput();
+ }
+
+ if (answer_out_of_range_.is_linked()) {
+ GenerateAnswerOutOfRange();
+ }
+}
+
+
+// Convert and write the integer answer into heap_number.
+void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
+ Register heap_number,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, answer);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(scratch, heap_number, Operand(kHeapObjectTag));
+ __ vstr(d0, scratch, HeapNumber::kValueOffset);
+ } else {
+ WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
+ __ CallStub(&stub);
+ }
+}
+
+
+void DeferredInlineSmiOperation::GenerateNonSmiInput() {
+ // We know the left hand side is not a Smi and the right hand side is an
+ // immediate value (value_) which can be represented as a Smi. We only
+ // handle bit operations.
+ ASSERT(Token::IsBitOp(op_));
+
+ if (FLAG_debug_code) {
+ __ Abort("Should not fall through!");
+ }
+
+ __ bind(&non_smi_input_);
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(tos_register_);
+ }
+
+ // This routine uses the registers from r2 to r6. At the moment they are
+ // not used by the register allocator, but when they are it should use
+ // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+ Register heap_number_map = r7;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
+ __ cmp(r3, heap_number_map);
+ // Not a number, fall back to the GenericBinaryOpStub.
+ __ b(ne, entry_label());
+
+ Register int32 = r2;
+ // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
+ __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+
+ // tos_register_ (r0 or r1): Original heap number.
+ // int32: signed 32bits int.
+
+ Label result_not_a_smi;
+ int shift_value = value_ & 0x1f;
+ switch (op_) {
+ case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
+ case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
+ case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
+ case Token::SAR:
+ ASSERT(!reversed_);
+ if (shift_value != 0) {
+ __ mov(int32, Operand(int32, ASR, shift_value));
+ }
+ break;
+ case Token::SHR:
+ ASSERT(!reversed_);
+ if (shift_value != 0) {
+ __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
+ } else {
+ // SHR is special because it is required to produce a positive answer.
+ __ cmp(int32, Operand(0, RelocInfo::NONE));
+ }
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ // Non VFP code cannot convert from unsigned to double, so fall back
+ // to GenericBinaryOpStub.
+ __ b(mi, entry_label());
+ }
+ break;
+ case Token::SHL:
+ ASSERT(!reversed_);
+ if (shift_value != 0) {
+ __ mov(int32, Operand(int32, LSL, shift_value));
+ }
+ break;
+ default: UNREACHABLE();
+ }
+ // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
+ // if the shift if more than 0 or SHR if the shit is more than 1.
+ if (!( (op_ == Token::AND) ||
+ ((op_ == Token::SAR) && (shift_value > 0)) ||
+ ((op_ == Token::SHR) && (shift_value > 1)))) {
+ __ add(r3, int32, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ }
+ __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
+ Exit();
+
+ if (result_not_a_smi.is_linked()) {
+ __ bind(&result_not_a_smi);
+ if (overwrite_mode_ != OVERWRITE_LEFT) {
+ ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
+ (overwrite_mode_ == OVERWRITE_RIGHT));
+ // If the allocation fails, fall back to the GenericBinaryOpStub.
+ __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
+ // Nothing can go wrong now, so overwrite tos.
+ __ mov(tos_register_, Operand(r4));
+ }
+
+ // int32: answer as signed 32bits integer.
+ // tos_register_: Heap number to write the answer into.
+ WriteNonSmiAnswer(int32, tos_register_, r3);
+
+ Exit();
+ }
+}
+
+
+void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
+ // The input from a bitwise operation were Smis but the result cannot fit
+ // into a Smi, so we store it into a heap number. tos_resgiter_ holds the
+ // result to be converted.
+ ASSERT(Token::IsBitOp(op_));
+ ASSERT(!reversed_);
+
+ if (FLAG_debug_code) {
+ __ Abort("Should not fall through!");
+ }
+
+ __ bind(&answer_out_of_range_);
+ if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
+ // >>> 0 is a special case where the result is already tagged but wrong
+ // because the Smi is negative. We untag it.
+ __ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize));
+ }
+
+ // This routine uses the registers from r2 to r6. At the moment they are
+ // not used by the register allocator, but when they are it should use
+ // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+ // Allocate the result heap number.
+ Register heap_number_map = r7;
+ Register heap_number = r4;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ // If the allocation fails, fall back to the GenericBinaryOpStub.
+ __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
+ WriteNonSmiAnswer(tos_register_, heap_number, r3);
+ __ mov(tos_register_, Operand(heap_number));
+
+ Exit();
}
@@ -1049,6 +1234,43 @@ static int BitPosition(unsigned x) {
}
+// Can we multiply by x with max two shifts and an add.
+// This answers yes to all integers from 2 to 10.
+static bool IsEasyToMultiplyBy(int x) {
+ if (x < 2) return false; // Avoid special cases.
+ if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
+ if (IsPowerOf2(x)) return true; // Simple shift.
+ if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
+ if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
+ return false;
+}
+
+
+// Can multiply by anything that IsEasyToMultiplyBy returns true for.
+// Source and destination may be the same register. This routine does
+// not set carry and overflow the way a mul instruction would.
+static void InlineMultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int) {
+ if (IsPowerOf2(known_int)) {
+ masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
+ } else if (PopCountLessThanEqual2(known_int)) {
+ int first_bit = BitPosition(known_int);
+ int second_bit = BitPosition(known_int ^ (1 << first_bit));
+ masm->add(destination, source,
+ Operand(source, LSL, second_bit - first_bit));
+ if (first_bit != 0) {
+ masm->mov(destination, Operand(destination, LSL, first_bit));
+ }
+ } else {
+ ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
+ int the_bit = BitPosition(known_int + 1);
+ masm->rsb(destination, source, Operand(source, LSL, the_bit));
+ }
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
@@ -1118,7 +1340,8 @@ void CodeGenerator::SmiOperation(Token::Value op,
frame_->EmitPush(lhs, TypeInfo::Smi());
TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
frame_->EmitPush(rhs, t);
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
+ GenericBinaryOpStub::kUnknownIntValue);
}
return;
}
@@ -1173,10 +1396,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
frame_->EmitPush(tos, TypeInfo::Smi());
} else {
- DeferredCode* deferred =
+ DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
__ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ deferred->JumpToNonSmiInput(ne);
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
@@ -1222,92 +1445,87 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
- TypeInfo result =
- (op == Token::SAR) ? TypeInfo::Integer32() : TypeInfo::Number();
- if (!reversed) {
- if (op == Token::SHR) {
- if (int_value >= 2) {
- result = TypeInfo::Smi();
- } else if (int_value >= 1) {
- result = TypeInfo::Integer32();
- }
+ int shift_value = int_value & 0x1f;
+ TypeInfo result = TypeInfo::Number();
+
+ if (op == Token::SHR) {
+ if (shift_value > 1) {
+ result = TypeInfo::Smi();
+ } else if (shift_value > 0) {
+ result = TypeInfo::Integer32();
+ }
+ } else if (op == Token::SAR) {
+ if (shift_value > 0) {
+ result = TypeInfo::Smi();
} else {
- if (int_value >= 1) {
- result = TypeInfo::Smi();
- }
+ result = TypeInfo::Integer32();
}
+ } else {
+ ASSERT(op == Token::SHL);
+ result = TypeInfo::Integer32();
}
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- int shift_value = int_value & 0x1f; // least significant 5 bits
- DeferredCode* deferred =
+
+ DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
- uint32_t problematic_mask = kSmiTagMask;
- // For unsigned shift by zero all negative smis are problematic.
- bool skip_smi_test = both_sides_are_smi;
- if (shift_value == 0 && op == Token::SHR) {
- problematic_mask |= 0x80000000;
- skip_smi_test = false;
- }
- if (!skip_smi_test) {
- __ tst(tos, Operand(problematic_mask));
- deferred->Branch(ne); // Go slow for problematic input.
+ if (!both_sides_are_smi) {
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->JumpToNonSmiInput(ne);
}
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
+ Register scratch = VirtualFrame::scratch0();
int adjusted_shift = shift_value - kSmiTagSize;
ASSERT(adjusted_shift >= 0);
+
if (adjusted_shift != 0) {
- __ mov(scratch, Operand(tos, LSL, adjusted_shift));
- // Check that the *signed* result fits in a smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
- __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
- } else {
- // Check that the *signed* result fits in a smi.
- __ add(scratch2, tos, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
- __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+ __ mov(tos, Operand(tos, LSL, adjusted_shift));
}
+ // Check that the *signed* result fits in a smi.
+ __ add(scratch, tos, Operand(0x40000000), SetCC);
+ deferred->JumpToAnswerOutOfRange(mi);
+ __ mov(tos, Operand(tos, LSL, kSmiTagSize));
}
break;
}
case Token::SHR: {
if (shift_value != 0) {
+ Register scratch = VirtualFrame::scratch0();
__ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
- // LSR by immediate 0 means shifting 32 bits.
- __ mov(scratch, Operand(scratch, LSR, shift_value));
+ __ mov(tos, Operand(scratch, LSR, shift_value));
if (shift_value == 1) {
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ tst(scratch, Operand(0xc0000000));
- deferred->Branch(ne);
- } else {
- ASSERT(shift_value >= 2);
- result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
+ // - 0x40000000: this number would convert to negative when Smi
+ // tagging.
+ // These two cases can only happen with shifts by 0 or 1 when
+ // handed a valid smi.
+ __ tst(tos, Operand(0xc0000000));
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // If the unsigned result does not fit in a Smi, we require an
+ // unsigned to double conversion. Without VFP V8 has to fall
+ // back to the runtime. The deferred code will expect tos
+ // to hold the original Smi to be shifted.
+ __ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne);
+ }
+ deferred->JumpToAnswerOutOfRange(ne);
}
- __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+ __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+ } else {
+ __ cmp(tos, Operand(0, RelocInfo::NONE));
+ deferred->JumpToAnswerOutOfRange(mi);
}
break;
}
case Token::SAR: {
- // In the ARM instructions set, ASR by immediate 0 means shifting 32
- // bits.
if (shift_value != 0) {
- // Do the shift and the tag removal in one operation. If the shift
+ // Do the shift and the tag removal in one operation. If the shift
// is 31 bits (the highest possible value) then we emit the
- // instruction as a shift by 0 which means shift arithmetically by
- // 32.
+ // instruction as a shift by 0 which in the ARM ISA means shift
+ // arithmetically by 32.
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
- // Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
- // SAR by at least 1 gives a Smi.
- result = TypeInfo::Smi();
}
break;
}
@@ -1354,7 +1572,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
// brevity to comprehensiveness.
__ tst(tos, Operand(mask));
deferred->Branch(ne);
- MultiplyByKnownInt(masm_, tos, tos, int_value);
+ InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1435,7 +1653,7 @@ void CodeGenerator::Comparison(Condition cc,
// We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
smi.Bind();
@@ -1556,7 +1774,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+ __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r1, Operand(apply_code));
__ b(ne, &build_args);
@@ -1598,7 +1817,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// frame.
Label loop;
// r3 is a small non-negative integer, due to the test above.
- __ cmp(r3, Operand(0));
+ __ cmp(r3, Operand(0, RelocInfo::NONE));
__ b(eq, &invoke);
// Compute the address of the first argument.
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
@@ -1756,7 +1975,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- frame_->EmitPush(Operand(0));
+ frame_->EmitPush(Operand(0, RelocInfo::NONE));
}
frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
@@ -3445,12 +3664,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length();
- if (node->depth() > 1) {
+ if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ frame_->CallStub(&stub, 3);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ } else if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
frame_->CallStub(&stub, 3);
}
frame_->EmitPush(r0); // save the result
@@ -3521,9 +3746,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -3621,9 +3844,7 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -3737,9 +3958,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -4167,11 +4386,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -4180,21 +4398,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
Load(args->at(i));
}
+ // Spill everything from here to simplify the implementation.
VirtualFrame::SpilledScope spilled_scope(frame_);
- // r0: the number of arguments.
+ // Load the argument count into r0 and the function into r1 as per
+ // calling convention.
__ mov(r0, Operand(arg_count));
- // Load the function into r1 as per calling convention.
- __ ldr(r1, frame_->ElementAt(arg_count + 1));
+ __ ldr(r1, frame_->ElementAt(arg_count));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
+ frame_->EmitPush(r0);
- // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
- __ str(r0, frame_->Top());
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4394,7 +4612,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
// Get the absolute untagged value of the exponent and use that for the
// calculation.
__ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
- __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative.
+ // Negate if negative.
+ __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
// Run through all the bits in the exponent. The result is calculated in d0
@@ -4407,14 +4626,14 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ b(ne, &more_bits);
// If exponent is positive we are done.
- __ cmp(exponent, Operand(0));
+ __ cmp(exponent, Operand(0, RelocInfo::NONE));
__ b(ge, &allocate_return);
// If exponent is negative result is 1/result (d2 already holds 1.0 in that
// case). However if d0 has reached infinity this will not provide the
// correct result, so call runtime if that is the case.
__ mov(scratch2, Operand(0x7FF00000));
- __ mov(scratch1, Operand(0));
+ __ mov(scratch1, Operand(0, RelocInfo::NONE));
__ vmov(d1, scratch1, scratch2); // Load infinity into d1.
__ vcmp(d0, d1);
__ vmrs(pc);
@@ -4917,7 +5136,7 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
__ jmp(exit_label());
__ bind(&false_result);
// Set false result.
- __ mov(map_result_, Operand(0));
+ __ mov(map_result_, Operand(0, RelocInfo::NONE));
}
private:
@@ -5091,7 +5310,7 @@ void CodeGenerator::GenerateRandomHeapNumber(
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(0, RelocInfo::NONE));
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
@@ -5258,6 +5477,73 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ frame_->PopToR0();
+ {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
+ Label done;
+ Label call_runtime;
+ __ BranchOnSmi(r0, &done);
+
+ // Load JSRegExp map into r1. Check that argument object has this map.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+ __ ldr(r1, ContextOperand(r1, Context::REGEXP_RESULT_MAP_INDEX));
+ __ ldr(ip, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(ne, &done);
+
+ if (FLAG_debug_code) {
+ __ LoadRoot(r2, Heap::kEmptyFixedArrayRootIndex);
+ __ ldr(ip, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ cmp(ip, r2);
+ __ Check(eq, "JSRegExpResult: default map but non-empty properties.");
+ }
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ r2,
+ r3,
+ r4,
+ &call_runtime,
+ NO_ALLOCATION_FLAGS);
+ // Store RegExpResult map as map of allocated object.
+ ASSERT(JSRegExpResult::kSize == 6 * kPointerSize);
+ // Copy all fields (map is already in r1) from (untagged) r0 to r2.
+ // Change map of elements array (ends up in r4) to be a FixedCOWArray.
+ __ bic(r0, r0, Operand(kHeapObjectTagMask));
+ __ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
+ __ stm(ia, r2,
+ r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
+ ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize);
+ // Check whether elements array is empty fixed array, and otherwise make
+ // it copy-on-write (it never should be empty unless someone is messing
+ // with the arguments to the runtime function).
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ add(r0, r2, Operand(kHeapObjectTag)); // Tag result and move it to r0.
+ __ cmp(r4, ip);
+ __ b(eq, &done);
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ __ str(ip, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ b(&done);
+ __ bind(&call_runtime);
+ __ push(r0);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ __ bind(&done);
+ }
+ frame_->EmitPush(r0);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
@@ -5402,7 +5688,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz);
- // Check the object's elements are in fast case.
+ // Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
__ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
@@ -5547,6 +5833,27 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+ Register tmp = frame_->scratch0();
+ __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
+ __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
+ cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+
+ __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
+ __ IndexFromHash(value, value);
+ frame_->EmitPush(value);
+}
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
@@ -5660,9 +5967,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
@@ -5986,12 +6291,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Literal* rliteral = node->right()->AsLiteral();
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
- bool overwrite_left =
- (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
- bool overwrite_right =
- (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_left = node->left()->ResultOverwriteAllowed();
+ bool overwrite_right = node->right()->ResultOverwriteAllowed();
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
@@ -6060,47 +6361,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Expression* right = node->right();
Token::Value op = node->op();
- // To make null checks efficient, we check if either left or right is the
- // literal 'null'. If so, we optimize the code by inlining a null check
- // instead of calling the (very) general runtime routine for checking
- // equality.
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- bool left_is_null =
- left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
- bool right_is_null =
- right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
- // The 'null' value can only be equal to 'null' or 'undefined'.
- if (left_is_null || right_is_null) {
- Load(left_is_null ? right : left);
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (op != Token::EQ_STRICT) {
- true_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, Operand(ip));
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- }
-
- cc_reg_ = eq;
- ASSERT(has_cc() && frame_->height() == original_height);
- return;
- }
- }
-
// To make typeof testing for natives implemented in JavaScript really
// efficient, we generate special code for expressions of the form:
// 'typeof <expression> == <string>'.
@@ -6261,6 +6521,40 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Register tos = frame_->PopToRegister();
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos, ip);
+
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ if (!node->is_strict()) {
+ true_target()->Branch(eq);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(tos, Operand(ip));
+ true_target()->Branch(eq);
+
+ __ tst(tos, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ // It can be an undetectable object.
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+ __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
+ __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+ __ cmp(tos, Operand(1 << Map::kIsUndetectable));
+ }
+
+ cc_reg_ = eq;
+ ASSERT(has_cc() && frame_->height() == original_height);
+}
+
+
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
explicit DeferredReferenceGetNamedValue(Register receiver,
@@ -6694,15 +6988,9 @@ void CodeGenerator::EmitKeyedLoad() {
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
+ // Get the elements array from the receiver.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch2, ip);
- __ Assert(eq, "JSObject with fast elements map has slow elements");
- }
+ __ AssertFastElements(scratch1);
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
@@ -6991,1890 +7279,6 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ Push(cp, r3);
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // Setup the fixed slots.
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
- __ b(eq, &slow_case);
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0));
- __ Ret();
-
- __ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc,
- bool never_nan_nan) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r0, r1);
- __ b(ne, &not_identical);
-
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cc != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cc == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
- }
-
- __ bind(&return_equal);
- if (cc == lt) {
- __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cc == gt) {
- __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- if (cc != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
- }
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r0 then there is already a non zero value in it.
- if (!rhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ jmp(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r0 then there is already a non zero value in it.
- if (!lhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0));
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0));
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cc == lt || cc == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
- __ CallCFunction(ExternalReference::compare_doubles(), 4);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // If either operand is a JSObject or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &first_non_object);
-
- // Return non-zero (r0 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r2, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r3, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(ne, &return_not_equal);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
- __ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r2, r3);
- __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
- Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
- __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
-
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
- __ Ret();
-
- __ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ BranchOnSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ BranchOnSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ vcmp(d0, d1);
- __ vmrs(pc);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native,
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ add(offset_, object_, Operand(offset_));
- __ RecordWriteHelper(object_, offset_, scratch_);
- __ Ret();
-}
-
-
-// On entry lhs_ and rhs_ are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ vcmp(d7, d6);
- __ vmrs(pc); // Move vector status bits to normal status bits.
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc_ == lt || cc_ == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
- }
-
- Label check_for_symbols;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
- &both_loaded_as_doubles,
- &check_for_symbols,
- &flat_string_check);
-
- __ bind(&check_for_symbols);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
-
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
- r2,
- r3,
- r4,
- r5);
- // Never falls through to here.
-
- __ bind(&slow);
-
- __ Push(lhs_, rhs_);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_JS);
-}
-
-
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
-// The stub returns zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result;
- Label not_heap_number;
- Register scratch0 = VirtualFrame::scratch0();
-
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch0, ip);
- __ b(&not_heap_number, ne);
-
- __ sub(ip, tos_, Operand(kHeapObjectTag));
- __ vldr(d1, ip, HeapNumber::kValueOffset);
- __ vcmp(d1, 0.0);
- __ vmrs(pc);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN
- __ Ret();
-
- __ bind(&not_heap_number);
-
- // Check if the value is 'null'.
- // 'null' => false.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos_, ip);
- __ b(&false_result, eq);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch0, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
- __ Ret();
-
- // Return 0 in "tos_" for false .
- __ bind(&false_result);
- __ mov(tos_, Operand(0));
- __ Ret();
-}
-
-
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in r0 and r1. In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
- MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- Label slow, slow_reverse, do_the_call;
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
-
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
- Register heap_number_map = r6;
-
- if (ShouldGenerateSmiCode()) {
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Smi-smi case (overflow).
- // Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r3 and r7 are scratch.
- __ AllocateHeapNumber(
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r9);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r9);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- __ jmp(&do_the_call); // Tail call. No return.
- }
-
- // We branch here if at least one of r0 and r1 is not a Smi.
- __ bind(not_smi);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // After this point we have the left hand side in r1 and the right hand side
- // in r0.
- if (lhs.is(r0)) {
- __ Swap(r0, r1, ip);
- }
-
- // The type transition also calculates the answer.
- bool generate_code_to_calculate_answer = true;
-
- if (ShouldGenerateFPCode()) {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm); // Tail call.
- generate_code_to_calculate_answer = false;
- break;
-
- default:
- break;
- }
- }
-
- if (generate_code_to_calculate_answer) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may
- // as well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
-
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- }
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm); // Tail call.
- }
-
- __ bind(&finished_loading_r0);
-
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- __ bind(&finished_loading_r1);
- }
-
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
- } else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
- }
- }
- }
-
- if (!generate_code_to_calculate_answer &&
- !slow_reverse.is_linked() &&
- !slow.is_linked()) {
- return;
- }
-
- if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
- }
-
- heap_number_map = no_reg; // Don't use this any more from here on.
-
- // We jump to here if something goes wrong (one param is not a number of any
- // sort or new-space allocation fails).
- __ bind(&slow);
-
- // Push arguments to the stack
- __ Push(r1, r0);
-
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
-
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &not_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string1);
-
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &not_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
- __ bind(&not_strings);
- }
-
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Fastest for doubles that are in the ranges
-// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
-// almost to the range of signed int32 values that are not Smis. Jumps to the
-// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
-// (excluding the endpoints).
-static void GetInt32(MacroAssembler* masm,
- Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- Label* slow) {
- Label right_exponent, done;
- // Get exponent word.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- __ mov(dest, Operand(0));
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- __ sub(scratch2, scratch2, Operand(fudge_factor));
- __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- __ b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ b(gt, slow);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- __ b(lt, &done);
- if (!CpuFeatures::IsSupported(VFP3)) {
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- __ rsb(dest, scratch2, Operand(30));
- }
- __ bind(&right_exponent);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions implementing double precision to integer
- // conversion using round to zero.
- __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- __ vmov(d7, scratch2, scratch);
- __ vcvt_s32_f64(s15, d7);
- __ vmov(dest, s15);
- } else {
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
- }
- __ bind(&done);
-}
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs. On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label slow, result_not_a_smi;
- Label rhs_is_smi, lhs_is_smi;
- Label done_checking_rhs, done_checking_lhs;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- GetInt32(masm, lhs, r3, r5, r4, &slow);
- __ jmp(&done_checking_lhs);
- __ bind(&lhs_is_smi);
- __ mov(r3, Operand(lhs, ASR, 1));
- __ bind(&done_checking_lhs);
-
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- GetInt32(masm, rhs, r2, r5, r4, &slow);
- __ jmp(&done_checking_rhs);
- __ bind(&rhs_is_smi);
- __ mov(r2, Operand(rhs, ASR, 1));
- __ bind(&done_checking_rhs);
-
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
- // r0 and r1: Original operands (Smi or heap numbers).
- // r2 and r3: Signed int32 operands.
- switch (op_) {
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of writing
- // the register as an unsigned int so we go to slow case if we hit this
- // case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, &slow);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default: UNREACHABLE();
- }
- // check that the *signed* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- __ Ret();
-
- Label have_to_allocate, got_a_heap_number;
- __ bind(&result_not_a_smi);
- switch (mode_) {
- case OVERWRITE_RIGHT: {
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(rhs));
- break;
- }
- case OVERWRITE_LEFT: {
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(lhs));
- break;
- }
- case NO_OVERWRITE: {
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- default: break;
- }
- __ bind(&got_a_heap_number);
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- if (mode_ != NO_OVERWRITE) {
- __ bind(&have_to_allocate);
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- __ jmp(&got_a_heap_number);
- }
-
- // If all else failed then we go to the runtime system.
- __ bind(&slow);
- __ Push(lhs, rhs); // Restore stack.
- switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
- if (x < 2) return false; // Avoid special cases.
- if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
- if (IsPowerOf2(x)) return true; // Simple shift.
- if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
- if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
- return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register. This routine does
-// not set carry and overflow the way a mul instruction would.
-static void MultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int) {
- if (IsPowerOf2(known_int)) {
- __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
- } else if (PopCountLessThanEqual2(known_int)) {
- int first_bit = BitPosition(known_int);
- int second_bit = BitPosition(known_int ^ (1 << first_bit));
- __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
- if (first_bit != 0) {
- __ mov(destination, Operand(destination, LSL, first_bit));
- }
- } else {
- ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
- int the_bit = BitPosition(known_int + 1);
- __ rsb(destination, source, Operand(source, LSL, the_bit));
- }
-}
-
-
-// This function (as opposed to MultiplyByKnownInt) takes the known int in a
-// a register for the cases where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownInt2(
- MacroAssembler* masm,
- Register result,
- Register source,
- Register known_int_register, // Smi tagged.
- int known_int,
- int* required_shift) { // Including Smi tag shift
- switch (known_int) {
- case 3:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 1;
- break;
- case 5:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 1;
- break;
- case 6:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 2;
- break;
- case 7:
- __ rsb(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 9:
- __ add(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 10:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 2;
- break;
- default:
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
- __ mul(result, source, known_int_register);
- *required_shift = 0;
- }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask. On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ bic(scratch, lhs, Operand(mask));
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift1));
- __ add(lhs, lhs, Operand(scratch, LSR, shift2));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits). The top
-// half is subtracted from the bottom half. If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs) {
- int mask = (1 << shift) - 1;
- __ and_(ip, lhs, Operand(mask));
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator) {
- int limit = denominator;
- while (limit * 2 <= max) limit *= 2;
- while (limit >= denominator) {
- __ cmp(lhs, Operand(limit));
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
- limit >>= 1;
- }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits) {
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
- __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
- __ bic(odd_number_, odd_number_, Operand(1));
- __ mov(odd_number_, Operand(odd_number_, LSL, 1));
- // We now have (odd_number_ - 1) * 2 in the register.
- // Build a switch out of branches instead of data because it avoids
- // having to teach the assembler about intra-code-object pointers
- // that are not in relative branch instructions.
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
- Label mod21, mod23, mod25;
- { Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(pc, pc, Operand(odd_number_));
- // When you read pc it is always 8 ahead, but when you write it you always
- // write the actual value. So we put in two nops to take up the slack.
- __ nop();
- __ nop();
- __ b(&mod3);
- __ b(&mod5);
- __ b(&mod7);
- __ b(&mod9);
- __ b(&mod11);
- __ b(&mod13);
- __ b(&mod15);
- __ b(&mod17);
- __ b(&mod19);
- __ b(&mod21);
- __ b(&mod23);
- __ b(&mod25);
- }
-
- // For each denominator we find a multiple that is almost only ones
- // when expressed in binary. Then we do the sum-of-digits trick for
- // that number. If the multiple is not 1 then we have to do a little
- // more work afterwards to get the answer into the 0-denominator-1
- // range.
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
- ModReduce(masm, lhs_, 0x3f, 11);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
- ModReduce(masm, lhs_, 0xff, 13);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
- ModReduce(masm, lhs_, 0xff, 19);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
- ModReduce(masm, lhs_, 0x3f, 21);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
- ModReduce(masm, lhs_, 0xff, 23);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
- ModReduce(masm, lhs_, 0x7f, 25);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
@@ -8899,2787 +7303,6 @@ const char* GenericBinaryOpStub::GetName() {
}
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- // lhs_ : x
- // rhs_ : y
- // r0 : result
-
- Register result = r0;
- Register lhs = lhs_;
- Register rhs = rhs_;
-
- // This code can't cope with other register allocations yet.
- ASSERT(result.is(r0) &&
- ((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0))));
-
- Register smi_test_reg = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
-
- // All ops need to know whether we are dealing with two Smis. Set up
- // smi_test_reg to tell us that.
- if (ShouldGenerateSmiCode()) {
- __ orr(smi_test_reg, lhs, Operand(rhs));
- }
-
- switch (op_) {
- case Token::ADD: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
- break;
- }
-
- case Token::SUB: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- if (lhs.is(r1)) {
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
- } else {
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
- }
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
- break;
- }
-
- case Token::MUL: {
- Label not_smi, slow;
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ b(ne, &not_smi);
- // Remove tag from one operand (but keep sign), so that result is Smi.
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
- // Do multiplication
- // scratch = lower 32 bits of ip * lhs.
- __ smull(scratch, scratch2, lhs, ip);
- // Go slow on overflows (overflow bit is not set).
- __ mov(ip, Operand(scratch, ASR, 31));
- // No overflow if higher 33 bits are identical.
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &slow);
- // Go slow on zero result to handle -0.
- __ tst(scratch, Operand(scratch));
- __ mov(result, Operand(scratch), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, rhs, Operand(lhs), SetCC);
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
- // Slow case. We fall through here if we multiplied a negative number
- // with 0, because that would mean we should produce -0.
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
- break;
- }
-
- case Token::DIV:
- case Token::MOD: {
- Label not_smi;
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label lhs_is_unsuitable;
- __ BranchOnNotSmi(lhs, &not_smi);
- if (IsPowerOf2(constant_rhs_)) {
- if (op_ == Token::MOD) {
- __ and_(rhs,
- lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
- SetCC);
- // We now have the answer, but if the input was negative we also
- // have the sign bit. Our work is done if the result is
- // positive or zero:
- if (!rhs.is(r0)) {
- __ mov(r0, rhs, LeaveCC, pl);
- }
- __ Ret(pl);
- // A mod of a negative left hand side must return a negative number.
- // Unfortunately if the answer is 0 then we must return -0. And we
- // already optimistically trashed rhs so we may need to restore it.
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
- // Next two instructions are conditional on the answer being -0.
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &lhs_is_unsuitable);
- // We need to subtract the dividend. Eg. -3 % 4 == -3.
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
- } else {
- ASSERT(op_ == Token::DIV);
- __ tst(lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
- int shift = 0;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- d >>= 1;
- shift++;
- }
- __ mov(r0, Operand(lhs, LSR, shift));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- } else {
- // Not a power of 2.
- __ tst(lhs, Operand(0x80000000u));
- __ b(ne, &lhs_is_unsuitable);
- // Find a fixed point reciprocal of the divisor so we can divide by
- // multiplying.
- double divisor = 1.0 / constant_rhs_;
- int shift = 32;
- double scale = 4294967296.0; // 1 << 32.
- uint32_t mul;
- // Maximise the precision of the fixed point reciprocal.
- while (true) {
- mul = static_cast<uint32_t>(scale * divisor);
- if (mul >= 0x7fffffff) break;
- scale *= 2.0;
- shift++;
- }
- mul++;
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ mov(scratch2, Operand(mul));
- __ umull(scratch, scratch2, scratch2, lhs);
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
- // rhs is still the known rhs. rhs is Smi tagged.
- // lhs is still the unkown lhs. lhs is Smi tagged.
- int required_scratch_shift = 0; // Including the Smi tag shift of 1.
- // scratch = scratch2 * rhs.
- MultiplyByKnownInt2(masm,
- scratch,
- scratch2,
- rhs,
- constant_rhs_,
- &required_scratch_shift);
- // scratch << required_scratch_shift is now the Smi tagged rhs *
- // (lhs / rhs) where / indicates integer division.
- if (op_ == Token::DIV) {
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &lhs_is_unsuitable); // There was a remainder.
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
- } else {
- ASSERT(op_ == Token::MOD);
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
- }
- }
- __ Ret();
- __ bind(&lhs_is_unsuitable);
- } else if (op_ == Token::MOD &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS) {
- // Do generate a bit of smi code for modulus even though the default for
- // modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense. We can handle
- // 1 to 25 times any power of 2. This covers over half the numbers from
- // 1 to 100 including all of the first 25. (Actually the constants < 10
- // are handled above by reciprocal multiplication. We only get here for
- // those cases if the right hand side is not a constant or for cases
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
- // stub.)
- Label slow;
- Label not_power_of_2;
- ASSERT(!ShouldGenerateSmiCode());
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- // Check for two positive smis.
- __ orr(smi_test_reg, lhs, Operand(rhs));
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &slow);
- // Check that rhs is a power of two and not zero.
- Register mask_bits = r3;
- __ sub(scratch, rhs, Operand(1), SetCC);
- __ b(mi, &slow);
- __ and_(mask_bits, rhs, Operand(scratch), SetCC);
- __ b(ne, &not_power_of_2);
- // Calculate power of two modulus.
- __ and_(result, lhs, Operand(scratch));
- __ Ret();
-
- __ bind(&not_power_of_2);
- __ eor(scratch, scratch, Operand(mask_bits));
- // At least two bits are set in the modulus. The high one(s) are in
- // mask_bits and the low one is scratch + 1.
- __ and_(mask_bits, scratch, Operand(lhs));
- Register shift_distance = scratch;
- scratch = no_reg;
-
- // The rhs consists of a power of 2 multiplied by some odd number.
- // The power-of-2 part we handle by putting the corresponding bits
- // from the lhs in the mask_bits register, and the power in the
- // shift_distance register. Shift distance is never 0 due to Smi
- // tagging.
- __ CountLeadingZeros(r4, shift_distance, shift_distance);
- __ rsb(shift_distance, r4, Operand(32));
-
- // Now we need to find out what the odd number is. The last bit is
- // always 1.
- Register odd_number = r4;
- __ mov(odd_number, Operand(rhs, LSR, shift_distance));
- __ cmp(odd_number, Operand(25));
- __ b(gt, &slow);
-
- IntegerModStub stub(
- result, shift_distance, odd_number, mask_bits, lhs, r5);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
-
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(
- masm,
- &not_smi,
- lhs,
- rhs,
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label slow;
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &slow);
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- switch (op_) {
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(result, Operand(lhs, ASR, scratch2));
- // Smi tag result.
- __ bic(result, result, Operand(kSmiTagMask));
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch, Operand(0xc0000000));
- __ b(ne, &slow);
- // Smi tag result.
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- __ b(mi, &slow);
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- HandleNonSmiBitwiseOp(masm, lhs, rhs);
- break;
- }
-
- default: UNREACHABLE();
- }
- // This code should be unreachable.
- __ stop("Unreachable");
-
- // Generate an unreachable reference to the DEFAULT stub so that it can be
- // found at the end of this stub when clearing ICs at GC.
- // TODO(kaznacheev): Check performance impact and get rid of this.
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
- __ CallStub(&uninit);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Argument is a number and is on stack and in r0.
- Label runtime_call;
- Label input_not_smi;
- Label loaded;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Load argument and check if it is a smi.
- __ BranchOnNotSmi(r0, &input_not_smi);
-
- CpuFeatures::Scope scope(VFP3);
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &runtime_call,
- true);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
-
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- __ mov(r0,
- Operand(ExternalReference::transcendental_cache_array_address()));
- // r0 points to cache array.
- __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(r0, Operand(0));
- __ b(eq, &runtime_call);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(r0, r0, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ b(ne, &runtime_call);
- __ cmp(r3, r5);
- __ b(ne, &runtime_call);
- // Cache hit. Load result, pop argument and return.
- __ mov(r0, Operand(r6));
- __ pop();
- __ Ret();
- }
-
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Do tail-call to runtime routine. Runtime routines expect at least one
- // argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-
- __ StubReturn(1);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
-
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0));
- __ StubReturn(1);
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0), SetCC);
- __ StubReturn(1, vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
- }
-
- __ bind(&try_float);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
-
- // Convert the heap number is r0 to an untagged integer in r1.
- GetInt32(masm, r0, r1, r2, r3, &slow);
-
- // Do the bitwise operation (move negated) and check if the result
- // fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ b(&done);
-
- __ bind(&try_float);
- if (!overwrite_ == UNARY_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite r0 until
- // we're sure we can do it without going through the slow case
- // that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ mov(r0, Operand(r2));
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- } else {
- UNIMPLEMENTED();
- }
-
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // r0 holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Restore the next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ ldr(r2, MemOperand(sp, kStateOffset));
- __ cmp(r2, Operand(StackHandler::ENTRY));
- __ b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ ldr(sp, MemOperand(sp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(r0, Operand(false));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r0, MemOperand(r2));
- }
-
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // fp
- // lr
-
- // Discard handler state (r2 is not used) and restore frame pointer.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate,
- int frame_alignment_skew) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(), 1);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
-#if defined(V8_HOST_ARCH_ARM)
- if (FLAG_debug_code) {
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- __ sub(r2, sp, Operand(frame_alignment_skew));
- __ tst(r2, Operand(frame_alignment_mask));
- __ b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
- __ bind(&alignment_as_expected);
- }
- }
-#endif
-
- // Just before the call (jump) below lr is pushed, so the actual alignment is
- // adding one to the current skew.
- int alignment_before_call =
- (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
- if (alignment_before_call > 0) {
- // Push until the alignment before the call is met.
- __ mov(r2, Operand(0));
- for (int i = alignment_before_call;
- (i & frame_alignment_mask) != 0;
- i += kPointerSize) {
- __ push(r2);
- }
- }
-
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
- masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
- masm->push(lr);
- masm->Jump(r5);
-
- // Restore sp back to before aligning the stack.
- if (alignment_before_call > 0) {
- __ add(sp, sp, Operand(alignment_before_call));
- }
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
- }
-
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
-
- // Exit C frame and return.
- // r0:r1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(mode_);
-
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r3, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(r0, Operand(Factory::termination_exception()));
- __ b(eq, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_);
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false,
- -kPointerSize);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false,
- 0);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true,
- kPointerSize);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, exit;
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
- __ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
-
- // Setup frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r5, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r5, MemOperand(ip));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ mov(ip, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ mov(ip, Operand(entry));
- }
- __ ldr(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
- __ str(r3, MemOperand(ip));
- // No need to restore registers
- __ add(sp, sp, Operand(StackHandlerConstants::kSize));
-
-
- __ bind(&exit); // r0 holds result
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
- __ str(r3, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses r1 for the object, r0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - slow case for smis (we may need to throw an exception
- // depending on the rhs).
- Label slow, loop, is_instance, is_not_instance;
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ BranchOnSmi(r0, &slow);
-
- // Check that the left hand is a JS object and put map in r3.
- __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
- __ b(gt, &slow);
-
- // Get the prototype of the function (r4 is result, r2 is scratch).
- __ ldr(r1, MemOperand(sp, 0));
- // r1 is function, r3 is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr));
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(r1, r4, r2, &slow);
-
- // Check that the function prototype is a JS object.
- __ BranchOnSmi(r4, &slow);
- __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
- __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
- __ b(gt, &slow);
-
- __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
-
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- __ bind(&loop);
- __ cmp(r2, Operand(r4));
- __ b(eq, &is_instance);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r2, ip);
- __ b(eq, &is_not_instance);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr)); // Return.
-
- __ bind(&is_not_instance);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr)); // Return.
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- static const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ BranchOnNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ cmp(r1, Operand(0));
- __ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4, offset));
-
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r1, Operand(0));
- __ b(eq, &done);
-
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
-
- // Copy the fixed array slots.
- Label loop;
- // Setup r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0));
- __ b(ne, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r4;
- Register regexp_data = r5;
- Register last_match_info_elements = r6;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(r0, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r0, MemOperand(r0, 0));
- __ tst(r0, Operand(r0));
- __ b(eq, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(ne, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
- __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ b(ne, &runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ ldr(r2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
- __ b(hi, &runtime);
-
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
- __ b(ne, &runtime);
- __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // Is first part a flat string?
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r0, Operand(kStringRepresentationMask));
- __ b(nz, &runtime);
-
- __ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
- __ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
-
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CompareObjectType(r7, r0, r0, CODE_TYPE);
- __ b(ne, &runtime);
-
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
- // r1: previous index
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
-
- static const int kRegExpExecuteArguments = 7;
- __ push(lr);
- __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
-
- // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
- __ mov(r0, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r2, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r2, MemOperand(r2, 0));
- __ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 5 (sp[0]): static offsets vector buffer.
- __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
- __ str(r0, MemOperand(sp, 0 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ eor(r3, r3, Operand(1));
- // Argument 4 (r3): End of string data
- // Argument 3 (r2): Start of string data
- __ add(r2, r9, Operand(r1, LSL, r3));
- __ add(r3, r9, Operand(r0, LSL, r3));
-
- // Argument 2 (r1): Previous index.
- // Already there
-
- // Argument 1 (r0): Subject string.
- __ mov(r0, subject);
-
- // Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r7, kRegExpExecuteArguments);
- __ pop(lr);
-
- // r0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
-
- // Check the result.
- Label success;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
- __ b(eq, &success);
- Label failure;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ b(eq, &failure);
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ b(ne, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ ldr(r1, MemOperand(r1, 0));
- __ cmp(r0, r1);
- __ b(eq, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r0, Operand(Factory::null_value()));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ ldr(r1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r1, r1, Operand(2)); // r1 was a smi.
-
- // r1: number of capture registers
- // r4: subject string
- // Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
- __ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
- __ mov(r2, Operand(address_of_static_offsets_vector));
-
- // r1: number of capture registers
- // r2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(mi, &done);
- // Read the value from the static offsets vector buffer.
- __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
- // Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- Label receiver_is_value, receiver_is_js_object;
- __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ BranchOnSmi(r1, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(r1);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ LeaveInternalFrame();
- __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ BranchOnSmi(r1, &slow);
- // Get the map of the function object.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Setup the number of arguments.
- __ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
- const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ tst(result_, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
- __ b(ls, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
-
- // Handle non-flat strings.
- __ tst(result_, Operand(kIsConsStringMask));
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(nz, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(nz, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- scratch_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- true);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ BranchOnNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ascii char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0));
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0));
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
- __ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- // Load undefined value
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- Register candidate = scratch5; // Scratch register contains candidate.
-
- // Calculate entry in symbol table.
- if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_symbol_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- __ cmp(candidate, undefined);
- __ b(eq, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check that the candidate is a non-external ascii string.
- __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
- &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ add(hash, character, Operand(character, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, ASR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, nz);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
-
-
- // Check bounds and smi-ness.
- __ ldr(r7, MemOperand(sp, kToOffset));
- __ ldr(r6, MemOperand(sp, kFromOffset));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r7, ASR, 1), SetCC);
- __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
- // If either r2 or r6 had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
- __ b(mi, &runtime); // From is negative.
-
- __ sub(r2, r2, Operand(r3), SetCC);
- __ b(mi, &runtime); // Fail if from > to.
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ cmp(r2, Operand(2));
- __ b(lt, &runtime);
-
- // r2: length
- // r3: from index (untaged smi)
- // r6: from (smi)
- // r7: to (smi)
-
- // Make sure first argument is a sequential (or flat) string.
- __ ldr(r5, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(r5, r1);
- __ b(NegateCondition(is_string), &runtime);
-
- // r1: instance type
- // r2: length
- // r3: from index (untaged smi)
- // r5: string
- // r6: from (smi)
- // r7: to (smi)
- Label seq_string;
- __ and_(r4, r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag < kConsStringTag);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- __ cmp(r4, Operand(kConsStringTag));
- __ b(gt, &runtime); // External strings go to runtime.
- __ b(lt, &seq_string); // Sequential strings are handled directly.
-
- // Cons string. Try to recurse (once) on the first substring.
- // (This adds a little more generality than necessary to handle flattened
- // cons strings, but not much).
- __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
- __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ tst(r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ b(ne, &runtime); // Cons and External strings go to runtime.
-
- // Definitly a sequential string.
- __ bind(&seq_string);
-
- // r1: instance type.
- // r2: length
- // r3: from index (untaged smi)
- // r5: string
- // r6: from (smi)
- // r7: to (smi)
- __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
- __ cmp(r4, Operand(r7));
- __ b(lt, &runtime); // Fail if to > length.
-
- // r1: instance type.
- // r2: result string length.
- // r3: from index (untaged smi)
- // r5: string.
- // r6: from offset (smi)
- // Check for flat ascii string.
- Label non_ascii_flat;
- __ tst(r1, Operand(kStringEncodingMask));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ b(eq, &non_ascii_flat);
-
- Label result_longer_than_two;
- __ cmp(r2, Operand(2));
- __ b(gt, &result_longer_than_two);
-
- // Sub string of length 2 requested.
- // Get the two characters forming the sub string.
- __ add(r5, r5, Operand(r3));
- __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
- __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // r2: result string length.
- // r3: two characters combined into halfword in little endian byte order.
- __ bind(&make_two_character_string);
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
- __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&result_longer_than_two);
-
- // Allocate the result.
- __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // r6: from offset (smi)
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r6, ASR, 1));
-
- // r0: result string.
- // r1: first character of result string.
- // r2: result string length.
- // r5: first character of sub string to copy.
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_flat);
- // r2: result string length.
- // r5: string.
- // r6: from offset (smi)
- // Check for flat two byte string.
-
- // Allocate the result.
- __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // As "from" is a smi it is 2 times the value which matches the size of a two
- // byte character.
- __ add(r5, r5, Operand(r6));
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of string to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(min_length, Operand(min_length));
- __ b(eq, &compare_lengths);
-
- // Untag smi.
- __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
-
- // Setup registers so that we only need to increment one register
- // in the loop.
- __ add(scratch2, min_length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch2));
- __ add(right, right, Operand(scratch2));
- // Registers left and right points to the min_length character of strings.
- __ rsb(min_length, min_length, Operand(-1));
- Register index = min_length;
- // Index starts at -min_length.
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ add(index, index, Operand(1), SetCC);
- __ ldrb(scratch2, MemOperand(left, index), ne);
- __ ldrb(scratch4, MemOperand(right, index), ne);
- // Skip to compare lengths with eq condition true.
- __ b(eq, &compare_lengths);
- __ cmp(scratch2, scratch4);
- __ b(eq, &loop);
- // Fallthrough with eq condition false.
- }
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use zero length_delta as result.
- __ mov(r0, Operand(length_delta), SetCC, eq);
- // Fall through to here if characters compare not-equal.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
-
- // Compare flat ascii strings natively. Remove arguments from stack first.
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
- // Stack on entry:
- // sp[0]: second argument.
- // sp[4]: first argument.
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(String::kMinNonFlatLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ascii the result is an ascii cons string.
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are
- // sequential and that they have the same encoding.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- // r6: sum of lengths.
- __ bind(&string_add_flat_result);
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- // Check that both strings are sequential.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- __ tst(r5, Operand(kStringRepresentationMask), eq);
- __ b(ne, &string_add_runtime);
- // Now check if both strings have the same encoding (ASCII/Two-byte).
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths..
- Label non_ascii_string_add_flat_result;
- ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &string_add_runtime);
- // And see if it's ASCII or two-byte.
- __ tst(r4, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- // Both strings are sequential ASCII strings. We also know that they are
- // short (since the sum of the lengths is less than kMinNonFlatLength).
- // r6: length of resulting flat string
- __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
-
- // Load second argument and locate first character.
- __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are sequential two byte strings.
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of length of strings.
- __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r7: result string.
-
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
-
- // Locate first character of second argument.
- __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result (after copy of first string).
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
-
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 029d59900d..162d97fd98 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -28,8 +28,9 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "ic-inl.h"
#include "ast.h"
+#include "code-stubs-arm.h"
+#include "ic-inl.h"
namespace v8 {
namespace internal {
@@ -270,15 +271,13 @@ class CodeGenerator: public AstVisitor {
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- static const int kUnknownIntValue = -1;
-
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 27 : 13;
+ return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
@@ -420,7 +419,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
- int known_rhs = kUnknownIntValue);
+ int known_rhs =
+ GenericBinaryOpStub::kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@@ -455,9 +455,6 @@ class CodeGenerator: public AstVisitor {
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
- static bool PatchInlineRuntimeEntry(Handle<String> name,
- const InlineRuntimeLUT& new_entry,
- InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -528,6 +525,8 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
@@ -548,6 +547,9 @@ class CodeGenerator: public AstVisitor {
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -610,510 +612,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = CodeGenerator::kUnknownIntValue)
- : op_(op),
- mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) { }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
-
- // Minor key encoding in 17 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 2> {};
- class RegisterBits: public BitField<bool, 10, 1> {};
- class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(r0));
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
-
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
- }
-
- Register LhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r0 : r1;
- }
-
- Register RhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r1 : r0;
- }
-
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in r0.
- // Does not use the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer. It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis. This is never called
-// where the denominator is a power of 2. We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register. It is always in the range
-// of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-// shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-// register.
-class IntegerModStub : public CodeStub {
- public:
- IntegerModStub(Register result,
- Register shift_distance,
- Register odd_number,
- Register mask_bits,
- Register lhs,
- Register scratch)
- : result_(result),
- shift_distance_(shift_distance),
- odd_number_(odd_number),
- mask_bits_(mask_bits),
- lhs_(lhs),
- scratch_(scratch) {
- // We don't code these in the minor key, so they should always be the same.
- // We don't really want to fix that since this stub is rather large and we
- // don't want many copies of it.
- ASSERT(shift_distance_.is(r9));
- ASSERT(odd_number_.is(r4));
- ASSERT(mask_bits_.is(r3));
- ASSERT(scratch_.is(r5));
- }
-
- private:
- Register result_;
- Register shift_distance_;
- Register odd_number_;
- Register mask_bits_;
- Register lhs_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ResultRegisterBits: public BitField<int, 0, 4> {};
- class LhsRegisterBits: public BitField<int, 4, 4> {};
-
- Major MajorKey() { return IntegerMod; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return ResultRegisterBits::encode(result_.code())
- | LhsRegisterBits::encode(lhs_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "IntegerModStub"; }
-
- // Utility functions.
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry);
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry);
- void ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs);
- void ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator);
- void ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits);
-
-
-#ifdef DEBUG
- void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register offset, Register scratch)
- : object_(object), offset_(offset), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register offset_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
- " (scratch reg %d)\n",
- object_.code(), offset_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, offset and scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class OffsetBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- OffsetBits::encode(offset_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 2ac9a41326..b2b5cb56b0 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -194,6 +194,13 @@ enum SoftwareInterruptCodes {
};
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision {
+ kSinglePrecision = 0,
+ kDoublePrecision = 1
+};
+
+
typedef int32_t instr_t;
@@ -269,6 +276,15 @@ class Instr {
inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); }
+ inline int VFPNRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 16, 7);
+ }
+ inline int VFPMRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 0, 5);
+ }
+ inline int VFPDRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 12, 22);
+ }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -343,6 +359,17 @@ class Instr {
static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision) {
+ return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+ }
+ return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+ }
+
// We need to prevent the creation of instances of class Instr.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
};
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 3a948451b4..8128f7deaf 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -130,41 +130,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
+ RegList object_regs,
+ RegList non_object_regs) {
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- // Use sp as base to push.
- __ CopyRegistersFromMemoryToStack(sp, pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
+ }
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ }
+ }
+ __ stm(db_w, sp, object_regs | non_object_regs);
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0)); // no arguments
+ __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
- // Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- // Use sp as base to pop.
- __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs);
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+ }
__ LeaveInternalFrame();
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -184,7 +201,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
}
@@ -198,7 +215,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -----------------------------------
// Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
}
@@ -206,9 +223,8 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
- // -- sp[0] : key
- // -- sp[4] : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ // -- r1 : receiver
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
}
@@ -218,31 +234,24 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
- // -- r0: number of arguments
- // -- r1: receiver
- // -- lr: return address
+ // -- r2 : name
// -----------------------------------
- // Register r1 contains an object that needs to be pushed on the expression
- // stack of the fake JS frame. r0 is the actual number of arguments not
- // encoded as a smi, therefore it cannot be on the expression stack of the
- // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
- // pushed on the stack of the C frame and restored from there.
- Generate_DebugBreakCallHelper(masm, r1.bit());
+ Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ // Calling convention for construct call (from builtins-arm.cc)
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
}
@@ -250,7 +259,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
@@ -258,7 +267,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
@@ -280,7 +289,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0029ed168b..5122f437b9 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -463,7 +463,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d",
+ "%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
}
@@ -931,7 +931,7 @@ void Decoder::DecodeType3(Instr* instr) {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat");
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
} else {
UNREACHABLE(); // SSAT.
}
@@ -1269,17 +1269,19 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
+ case 0xA:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
+ Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
+ Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
+ Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
+ Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
}
break;
default:
@@ -1300,16 +1302,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
break;
case 0x8:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
}
break;
case 0xC:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
}
break;
default:
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 271e4a6f0a..47434392df 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -37,87 +37,20 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
- Address sp = fp + ExitFrameConstants::kSPDisplacement;
- const int offset = ExitFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp + offset);
- bool is_debug_exit = code->IsSmi();
- if (is_debug_exit) {
- sp -= kNumJSCallerSaved * kPointerSize;
- }
+ Address sp = fp + ExitFrameConstants::kSPOffset;
+
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 4924c1aeb9..5847a6a2a0 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -96,11 +96,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // Exit frames have a debug marker on the stack.
- static const int kSPDisplacement = -1 * kPointerSize;
-
- // The debug marker is just above the frame pointer.
static const int kCodeOffset = -1 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index b58a4a5854..f32da6dfb1 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -245,6 +246,13 @@ void FullCodeGenerator::EmitReturnSequence() {
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ return kNoConstants;
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -266,16 +274,10 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
}
break;
- case Expression::kValueTest:
- case Expression::kTestValue:
- // Push an extra copy of the value in case it's needed.
- __ push(reg);
- // Fall through.
-
case Expression::kTest:
- // We always call the runtime on ARM, so push the value as argument.
- __ push(reg);
- DoTest(context);
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -290,8 +292,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// On ARM we have to move the value into a register to do anything
// with it.
Move(result_register(), slot);
@@ -310,8 +310,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
// Nothing to do.
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// On ARM we have to move the value into a register to do anything
// with it.
__ mov(result_register(), Operand(lit->handle()));
@@ -340,15 +338,9 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
}
break;
- case Expression::kValueTest:
- case Expression::kTestValue:
- // Duplicate the value on the stack in case it's needed.
- __ ldr(ip, MemOperand(sp));
- __ push(ip);
- // Fall through.
-
case Expression::kTest:
- DoTest(context);
+ __ pop(result_register());
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -381,54 +373,9 @@ void FullCodeGenerator::DropAndApply(int count,
break;
case Expression::kTest:
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (count == 1) {
- __ str(reg, MemOperand(sp));
- __ push(reg);
- } else { // count > 1
- __ Drop(count - 2);
- __ str(reg, MemOperand(sp, kPointerSize));
- __ str(reg, MemOperand(sp));
- }
- DoTest(context);
- break;
- }
-}
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -471,34 +418,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -527,103 +446,40 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ b(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) {
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- }
- break;
- }
- __ b(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- }
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
}
- __ b(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is pushed on the stack, and duplicated on the stack
- // if necessary (for value/test and test/value contexts).
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
+ __ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+}
- // Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ b(eq, true_label_);
- __ jmp(false_label_);
- break;
-
- case Expression::kValueTest: {
- Label discard;
- switch (location_) {
- case kAccumulator:
- __ b(ne, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ b(eq, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- switch (location_) {
- case kAccumulator:
- __ b(eq, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ b(ne, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ b(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ b(NegateCondition(cc), if_false);
+ } else {
+ __ b(cc, if_true);
+ __ b(if_false);
}
}
@@ -816,22 +672,23 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
+ // Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
-
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ orr(r2, r1, r0);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target()->entry_label());
__ bind(&slow_case);
+ }
+
CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@@ -1107,28 +964,33 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r2 = RegExp pattern
// r1 = RegExp flags
// r0 = temp + materialized value (RegExp literal)
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(ne, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in r0.
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+
// After this, registers are used as follows:
// r0: Newly allocated regexp.
- // r1: Materialized regexp
+ // r1: Materialized regexp.
// r2: temp.
__ pop(r1);
__ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
@@ -1223,12 +1085,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
__ Push(r3, r2, r1);
- if (expr->depth() > 1) {
+ if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ } else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@@ -1283,10 +1151,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1297,58 +1166,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
- // We need the key and receiver on both the stack and in r0 and r1.
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(r0); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1389,10 +1270,23 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ ASSERT(constant == kNoConstants); // Only handled case.
+ EmitBinaryOp(op, context, mode);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
+ Expression::Context context,
+ OverwriteMode mode) {
__ pop(r1);
- GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0);
+ GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
Apply(context, r0);
}
@@ -1821,12 +1715,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1838,16 +1732,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into r1 and r0.
+ // Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
- // Function is in sp[arg_count + 1].
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in r0, or pop it.
- DropAndApply(1, context_, r0);
+ Apply(context_, r0);
}
@@ -1859,7 +1750,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_true);
__ b(if_false);
@@ -1876,11 +1769,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1894,7 +1788,10 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
__ BranchOnSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip);
@@ -1908,8 +1805,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, if_true);
- __ b(if_false);
+ Split(le, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1923,12 +1819,13 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(ge, if_true);
- __ b(if_false);
+ Split(ge, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1942,14 +1839,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_true);
- __ b(if_false);
+ Split(ne, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1965,7 +1863,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -1983,12 +1883,13 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2002,12 +1903,13 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2021,12 +1923,13 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2039,7 +1942,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2055,8 +1960,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2072,12 +1976,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(r1);
__ cmp(r0, r1);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2227,7 +2132,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(0, RelocInfo::NONE));
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
@@ -2634,6 +2539,35 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
+
+ __ b(eq, if_true);
+ __ b(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kAccumulator);
+ __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ IndexFromHash(r0, r0);
+ Apply(context_, r0);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2738,19 +2672,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2762,42 +2684,19 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ push(r0);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, r0);
break;
@@ -2818,9 +2717,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2834,28 +2731,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register r0.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register r0.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- __ BranchOnSmi(result_register(), &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ BranchOnNotSmi(r0, &call_stub);
+ __ mvn(r0, Operand(r0));
+ // Bit-clear inverted smi-tag.
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ __ b(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ b(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ mvn(result_register(), Operand(result_register()));
- // Bit-clear inverted smi-tag.
- __ bic(result_register(), result_register(), Operand(kSmiTagMask));
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, r0);
break;
}
@@ -2867,6 +2762,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
@@ -2931,8 +2828,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -2955,7 +2850,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
int count_value = expr->op() == Token::INC ? 1 : -1;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
@@ -3020,68 +2915,126 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(r0);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ Push(cp, r0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
}
}
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmp(obj, null_const);
- if (strict) {
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
__ b(eq, if_true);
- } else {
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // Check for undetectable objects => false.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, if_false);
+ __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
+ Split(lt, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
__ b(eq, if_true);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(obj, ip);
+ __ cmp(r0, ip);
__ b(eq, if_true);
- __ BranchOnSmi(obj, if_false);
- // It can be an undetectable object.
- __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_true);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // Check for undetectable objects => true.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE);
+ __ b(eq, if_true);
+ // Regular expressions => 'function' (they are callable).
+ __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE);
+ __ b(eq, if_false);
+ // Check for undetectable objects => false.
+ __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, if_false);
+ // Check for JS objects => true.
+ __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, if_false);
+ __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE));
+ Split(le, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
@@ -3089,26 +3042,37 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
- switch (expr->op()) {
+ switch (op) {
case Token::IN:
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_JS);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
- __ b(eq, if_true);
- __ jmp(if_false);
+ Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
VisitForValue(expr->right(), kStack);
InstanceofStub stub;
__ CallStub(&stub);
+ // The stub returns 0 for true.
__ tst(r0, r0);
- __ b(eq, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ Split(eq, if_true, if_false, fall_through);
break;
}
@@ -3116,28 +3080,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = eq;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ: {
+ case Token::EQ:
cc = eq;
__ pop(r1);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, r1, r0, if_true, if_false, r2);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, r0, r1, if_true, if_false, r2);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = lt;
__ pop(r1);
@@ -3164,21 +3114,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- __ BranchOnNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ BranchOnNotSmi(r2, &slow_case);
+ __ cmp(r1, r0);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
- __ b(cc, if_true);
- __ jmp(if_false);
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3188,6 +3136,38 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ if (expr->is_strict()) {
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ __ b(eq, if_true);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, if_true);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // It can be an undetectable object.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, r0);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 1fd7098254..1a76db2ce3 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "assembler-arm.h"
-#include "codegen.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
@@ -414,17 +414,17 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
- Register scratch1,
- Register scratch2,
+ Register map,
+ Register scratch,
int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
// Get the map of the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2,
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
@@ -432,13 +432,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(JS_OBJECT_TYPE));
__ b(lt, slow);
}
// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -471,11 +472,15 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// scratch2 - used to hold the loaded value.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
// Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1));
@@ -522,32 +527,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- __ mov(key, Operand(hash, LSL, kSmiTagSize));
-}
-
-
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
@@ -847,7 +826,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, r2, r3);
+ __ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1120,16 +1099,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in r2.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ tst(r3, Operand(1 << Map::kHasFastElements));
+ __ b(eq, &check_pixel_array);
+
GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
+ masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
// Check whether the elements is a pixel array.
// r0: key
- // r3: elements map
- // r4: elements
+ // r1: receiver
__ bind(&check_pixel_array);
+ __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
@@ -1237,7 +1223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret();
__ bind(&index_string);
- GenerateIndexFromHash(masm, key, r3);
+ __ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1306,7 +1292,7 @@ static void GenerateUInt2Double(MacroAssembler* masm,
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
- __ mov(loword, Operand(0));
+ __ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
@@ -1690,7 +1676,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
+ // Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
@@ -1748,8 +1734,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(&fast);
// Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode; if it is the
- // length is always a smi.
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -1779,19 +1765,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
-// Convert int passed in register ival to IEE 754 single precision
-// floating point value and store it into register fval.
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
-static void ConvertIntToFloat(MacroAssembler* masm,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
+ __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
- __ vmov(fval, s0);
+ __ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
@@ -1801,7 +1790,7 @@ static void ConvertIntToFloat(MacroAssembler* masm,
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
- __ rsb(ival, ival, Operand(0), LeaveCC, ne);
+ __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -1841,6 +1830,7 @@ static void ConvertIntToFloat(MacroAssembler* masm,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
+ __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
@@ -1935,9 +1925,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- ConvertIntToFloat(masm, r5, r6, r7, r9);
- __ str(r6, MemOperand(r3, r4, LSL, 2));
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
@@ -1971,9 +1960,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
- __ vmov(r5, s0);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
+ __ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
@@ -2086,18 +2075,18 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0), LeaveCC, eq);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
- __ mov(r5, Operand(0), LeaveCC, eq);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
- __ mov(r5, Operand(0), LeaveCC, mi);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value.
@@ -2113,14 +2102,14 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
- __ rsb(r9, r9, Operand(0));
+ __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
- __ teq(r7, Operand(0));
- __ rsb(r5, r5, Operand(0), LeaveCC, ne);
+ __ teq(r7, Operand(0, RelocInfo::NONE));
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
@@ -2217,6 +2206,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss);
// Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 38c7c28c9d..35544312f1 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
@@ -224,7 +226,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
}
int32_t immediate = src2.immediate();
if (immediate == 0) {
- mov(dst, Operand(0), LeaveCC, cond);
+ mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
return;
}
if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
@@ -303,7 +305,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
tst(dst, Operand(~satval));
b(eq, &done);
- mov(dst, Operand(0), LeaveCC, mi); // 0 if negative.
+ mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
@@ -513,7 +515,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::EnterExitFrame() {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -556,16 +558,6 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Setup argc and the builtin function in callee-saved registers.
mov(r4, Operand(r0));
mov(r5, Operand(r1));
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // Use sp as base to push.
- CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
- }
-#endif
}
@@ -600,21 +592,9 @@ int MacroAssembler::ActivationFrameAlignment() {
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // This code intentionally clobbers r2 and r3.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- add(r3, fp, Operand(kOffset));
- CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
- }
-#endif
-
+void MacroAssembler::LeaveExitFrame() {
// Clear top frame.
- mov(r3, Operand(0));
+ mov(r3, Operand(0, RelocInfo::NONE));
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
str(r3, MemOperand(ip));
@@ -757,8 +737,7 @@ void MacroAssembler::InvokeFunction(Register fun,
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
- MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
- add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag);
@@ -780,69 +759,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- str(reg, MemOperand(ip));
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(reg, MemOperand(ip));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the memory location to the stack and adjust base.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(ip, MemOperand(ip));
- str(ip, MemOperand(base, 4, NegPreIndex));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(scratch, MemOperand(base, 4, PostIndex));
- str(scratch, MemOperand(ip));
- }
- }
-}
-
+#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
- mov(r0, Operand(0));
+ mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@@ -878,7 +799,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
// The frame pointer does not point to a JS frame so we save NULL
// for fp. We expect the code throwing an exception to check fp
// before dereferencing it to restore the context.
- mov(ip, Operand(0)); // To save a NULL frame pointer.
+ mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
mov(r6, Operand(StackHandler::ENTRY));
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
@@ -917,7 +838,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- cmp(scratch, Operand(0));
+ cmp(scratch, Operand(0, RelocInfo::NONE));
Check(ne, "we should not have an empty lexical context");
#endif
@@ -1338,6 +1259,21 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ mov(index, Operand(hash, LSL, kSmiTagSize));
+}
+
+
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg,
Register outLowReg) {
@@ -1399,6 +1335,104 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
}
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+void MacroAssembler::ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ Label *not_int32) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ sub(scratch, source, Operand(kHeapObjectTag));
+ vldr(d0, scratch, HeapNumber::kValueOffset);
+ vcvt_s32_f64(s0, d0);
+ vmov(dest, s0);
+ // Signed vcvt instruction will saturate to the minimum (0x80000000) or
+ // maximun (0x7fffffff) signed 32bits integer when the double is out of
+ // range. When substracting one, the minimum signed integer becomes the
+ // maximun signed integer.
+ sub(scratch, dest, Operand(1));
+ cmp(scratch, Operand(LONG_MAX - 1));
+ // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
+ b(ge, not_int32);
+ } else {
+ // This code is faster for doubles that are in the ranges -0x7fffffff to
+ // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
+ // the range of signed int32 values that are not Smis. Jumps to the label
+ // 'not_int32' if the double isn't in the range -0x80000000.0 to
+ // 0x80000000.0 (excluding the endpoints).
+ Label right_exponent, done;
+ // Get exponent word.
+ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ Ubfx(scratch2,
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ mov(dest, Operand(0, RelocInfo::NONE));
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+ // split it up to avoid a constant pool entry. You can't do that in general
+ // for cmp because of the overflow flag, but we know the exponent is in the
+ // range 0-2047 so there is no overflow.
+ int fudge_factor = 0x400;
+ sub(scratch2, scratch2, Operand(fudge_factor));
+ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ b(eq, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ b(gt, not_int32);
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
+ // Dest already has a Smi zero.
+ b(lt, &done);
+
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
+ // get how much to shift down.
+ rsb(dest, scratch2, Operand(30));
+
+ bind(&right_exponent);
+ // Get the top bits of the mantissa.
+ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ bind(&done);
+ }
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -1490,30 +1524,22 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
-
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
// Load the builtins object into target register.
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
-
// Load the JavaScript builtin function from the builtins object.
- ldr(r1, FieldMemOperand(target,
+ ldr(target, FieldMemOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(r1));
+ GetBuiltinFunction(r1, id);
// Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(r1);
- ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
- cmp(r1, target);
- Assert(eq, "Builtin code object changed");
- pop(r1);
- }
- add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
}
@@ -1567,6 +1593,25 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
}
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ ASSERT(!elements.is(ip));
+ Label ok;
+ push(elements);
+ ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
+ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ pop(elements);
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@@ -1773,7 +1818,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
- mov(zeros, Operand(0));
+ mov(zeros, Operand(0, RelocInfo::NONE));
Move(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 836ed74994..febd87e8a6 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -250,14 +250,14 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either normal or debug mode.
+ // Enter exit frame.
// Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(ExitFrame::Mode mode);
+ void EnterExitFrame();
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame();
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -294,12 +294,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void CopyRegistersFromMemoryToStack(Register base, RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -475,6 +469,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
@@ -504,6 +504,15 @@ class MacroAssembler: public Assembler {
Register scratch1,
SwVfpRegister scratch2);
+ // Convert the HeapNumber pointed to by source to a 32bits signed integer
+ // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+ // to not_int32 label.
+ void ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ Label *not_int32);
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case
@@ -576,6 +585,9 @@ class MacroAssembler: public Assembler {
// setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
Handle<Object> CodeObject() { return code_object_; }
@@ -597,6 +609,7 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index c67c7aacaa..8f45886d92 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -31,12 +31,10 @@
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
namespace v8 {
@@ -191,7 +189,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input?
@@ -206,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -366,7 +364,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4));
@@ -636,7 +634,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &exit_label_);
@@ -663,7 +661,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// string, and store that value in a local variable.
__ tst(r1, Operand(r1));
__ mov(r1, Operand(1), LeaveCC, eq);
- __ mov(r1, Operand(0), LeaveCC, ne);
+ __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -686,7 +684,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
@@ -753,7 +751,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &exit_label_);
@@ -780,7 +778,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0);
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 2c0a8d84d4..93a74d7ca4 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -242,22 +242,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index c4cc8d46cb..64262b2b81 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -727,6 +727,10 @@ void Simulator::set_register(int reg, int32_t value) {
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
ASSERT((reg >= 0) && (reg < num_registers));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= num_registers) return 0;
+ // End stupid code.
return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
}
@@ -1378,7 +1382,9 @@ void Simulator::HandleRList(Instr* instr, bool load) {
}
case 3: {
// Print("ib");
- UNIMPLEMENTED();
+ start_address = rn_val + 4;
+ end_address = rn_val + (num_regs * 4);
+ rn_val = end_address;
break;
}
default: {
@@ -2275,13 +2281,6 @@ void Simulator::DecodeUnconditional(Instr* instr) {
}
-// Depending on value of last_bit flag glue register code from vm and m values
-// (where m is expected to be a single bit).
-static int GlueRegCode(bool last_bit, int vm, int m) {
- return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
-}
-
-
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
@@ -2299,9 +2298,10 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
- int vm = instr->VmField();
- int vd = instr->VdField();
- int vn = instr->VnField();
+ // Obtain double precision register codes.
+ int vm = instr->VFPMRegCode(kDoublePrecision);
+ int vd = instr->VFPDRegCode(kDoublePrecision);
+ int vn = instr->VFPNRegCode(kDoublePrecision);
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
@@ -2309,9 +2309,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
// vmov register to register.
if (instr->SzField() == 0x1) {
- set_d_register_from_double(vd, get_double_from_d_register(vm));
+ int m = instr->VFPMRegCode(kDoublePrecision);
+ int d = instr->VFPDRegCode(kDoublePrecision);
+ set_d_register_from_double(d, get_double_from_d_register(m));
} else {
- set_s_register_from_float(vd, get_float_from_s_register(vm));
+ int m = instr->VFPMRegCode(kSinglePrecision);
+ int d = instr->VFPDRegCode(kSinglePrecision);
+ set_s_register_from_float(d, get_float_from_s_register(m));
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -2404,7 +2408,7 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
(instr->VAField() == 0x0));
int t = instr->RtField();
- int n = GlueRegCode(true, instr->VnField(), instr->NField());
+ int n = instr->VFPNRegCode(kSinglePrecision);
bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) {
@@ -2421,22 +2425,25 @@ void Simulator::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
-
// Comparison.
- bool dp_operation = (instr->SzField() == 1);
+
+ VFPRegPrecision precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ precision = kDoublePrecision;
+ }
if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8.
}
- int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
+ int d = instr->VFPDRegCode(precision);
int m = 0;
if (instr->Opc2Field() == 0x4) {
- m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ m = instr->VFPMRegCode(precision);
}
- if (dp_operation) {
+ if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d);
double dm_value = 0.0;
if (instr->Opc2Field() == 0x4) {
@@ -2454,11 +2461,17 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
- bool double_to_single = (instr->SzField() == 1);
- int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
- int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
+ VFPRegPrecision dst_precision = kDoublePrecision;
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ dst_precision = kSinglePrecision;
+ src_precision = kDoublePrecision;
+ }
+
+ int dst = instr->VFPDRegCode(dst_precision);
+ int src = instr->VFPMRegCode(src_precision);
- if (double_to_single) {
+ if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src);
set_s_register_from_float(dst, static_cast<float>(val));
} else {
@@ -2474,13 +2487,13 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
// Conversion between floating-point and integer.
- int vd = instr->VdField();
- int d = instr->DField();
- int vm = instr->VmField();
- int m = instr->MField();
-
bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzField() == 1);
+
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ src_precision = kDoublePrecision;
+ }
+
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (instr->Bit(7) != 1) {
@@ -2488,10 +2501,10 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8.
}
- int dst = GlueRegCode(true, vd, d);
- int src = GlueRegCode(!dp_operation, vm, m);
+ int dst = instr->VFPDRegCode(kSinglePrecision);
+ int src = instr->VFPMRegCode(src_precision);
- if (dp_operation) {
+ if (src_precision == kDoublePrecision) {
double val = get_double_from_d_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
@@ -2509,12 +2522,12 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
- int dst = GlueRegCode(!dp_operation, vd, d);
- int src = GlueRegCode(true, vm, m);
+ int dst = instr->VFPDRegCode(src_precision);
+ int src = instr->VFPMRegCode(kSinglePrecision);
int val = get_sinteger_from_s_register(src);
- if (dp_operation) {
+ if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
set_d_register_from_double(dst,
static_cast<double>((uint32_t)val));
@@ -2545,9 +2558,11 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
- case 0xC: { // Load and store float to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store single precision float to memory.
int rn = instr->RnField();
- int vd = instr->VdField();
+ int vd = instr->VFPDRegCode(kSinglePrecision);
int offset = instr->Immed8Field();
if (!instr->HasU()) {
offset = -offset;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index fa90ca7d11..344cb6fb90 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1297,11 +1297,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
@@ -1349,11 +1344,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
@@ -1373,8 +1363,68 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r0);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+ r1, r3, r4, name, &miss);
+
+ Register receiver = r1;
+ Register index = r4;
+ Register scratch = r3;
+ Register result = r0;
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
@@ -1383,8 +1433,71 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r0);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+ r1, r3, r4, name, &miss);
+
+ Register receiver = r0;
+ Register index = r4;
+ Register scratch1 = r1;
+ Register scratch2 = r3;
+ Register result = r0;
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index f3c0697b99..e12df64143 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -566,13 +566,6 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
- if (num_arguments == 0) return;
-
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
@@ -953,7 +946,8 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) {
- var length = this.length;
+ var length = TO_UINT32(this.length);
+ if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
index = 0;
} else {
@@ -963,13 +957,13 @@ function ArrayIndexOf(element, index) {
// If index is still negative, search the entire array.
if (index < 0) index = 0;
}
+ // Lookup through the array.
if (!IS_UNDEFINED(element)) {
for (var i = index; i < length; i++) {
if (this[i] === element) return i;
}
return -1;
}
- // Lookup through the array.
for (var i = index; i < length; i++) {
if (IS_UNDEFINED(this[i]) && i in this) {
return i;
@@ -980,7 +974,8 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
- var length = this.length;
+ var length = TO_UINT32(this.length);
+ if (length == 0) return -1;
if (%_ArgumentsLength() < 2) {
index = length - 1;
} else {
diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h
index 717f68d063..f0a25c171f 100644
--- a/deps/v8/src/ast-inl.h
+++ b/deps/v8/src/ast-inl.h
@@ -64,8 +64,7 @@ ForStatement::ForStatement(ZoneStringList* labels)
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
- loop_variable_(NULL),
- peel_this_loop_(false) {
+ loop_variable_(NULL) {
}
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 92df990063..9ff1be73bc 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -28,7 +28,6 @@
#include "v8.h"
#include "ast.h"
-#include "data-flow.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
@@ -78,18 +77,17 @@ VariableProxy::VariableProxy(Handle<String> name,
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
- is_trivial_(false),
- reaching_definitions_(NULL),
- is_primitive_(false) {
+ is_trivial_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
VariableProxy::VariableProxy(bool is_this)
- : is_this_(is_this),
- reaching_definitions_(NULL),
- is_primitive_(false) {
+ : var_(NULL),
+ is_this_(is_this),
+ inside_with_(false),
+ is_trivial_(false) {
}
@@ -237,6 +235,59 @@ bool Expression::GuaranteedSmiResult() {
return false;
}
+
+void Expression::CopyAnalysisResultsFrom(Expression* other) {
+ bitfields_ = other->bitfields_;
+ type_ = other->type_;
+}
+
+
+bool UnaryOperation::ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::BIT_NOT:
+ case Token::SUB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+bool BinaryOperation::ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ return false;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ return true;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+}
+
+
+BinaryOperation::BinaryOperation(Assignment* assignment) {
+ ASSERT(assignment->is_compound());
+ op_ = assignment->binary_op();
+ left_ = assignment->target();
+ right_ = assignment->value();
+ pos_ = assignment->position();
+ CopyAnalysisResultsFrom(assignment);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@@ -575,218 +626,6 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
}
-// IsPrimitive implementation. IsPrimitive is true if the value of an
-// expression is known at compile-time to be any JS type other than Object
-// (e.g, it is Undefined, Null, Boolean, String, or Number).
-
-// The following expression types are never primitive because they express
-// Object values.
-bool FunctionLiteral::IsPrimitive() { return false; }
-bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
-bool RegExpLiteral::IsPrimitive() { return false; }
-bool ObjectLiteral::IsPrimitive() { return false; }
-bool ArrayLiteral::IsPrimitive() { return false; }
-bool CatchExtensionObject::IsPrimitive() { return false; }
-bool CallNew::IsPrimitive() { return false; }
-bool ThisFunction::IsPrimitive() { return false; }
-
-
-// The following expression types are not always primitive because we do not
-// have enough information to conclude that they are.
-bool Property::IsPrimitive() { return false; }
-bool Call::IsPrimitive() { return false; }
-bool CallRuntime::IsPrimitive() { return false; }
-
-
-// A variable use is not primitive unless the primitive-type analysis
-// determines otherwise.
-bool VariableProxy::IsPrimitive() {
- ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
- return is_primitive_;
-}
-
-// The value of a conditional is the value of one of the alternatives. It's
-// always primitive if both alternatives are always primitive.
-bool Conditional::IsPrimitive() {
- return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
-}
-
-
-// A literal is primitive when it is not a JSObject.
-bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
-
-
-// The value of an assignment is the value of its right-hand side.
-bool Assignment::IsPrimitive() {
- switch (op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- return value()->IsPrimitive();
-
- default:
- // {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
- // Arithmetic operations are always primitive. They express Numbers
- // with the exception of +, which expresses a Number or a String.
- return true;
- }
-}
-
-
-// Throw does not express a value, so it's trivially always primitive.
-bool Throw::IsPrimitive() { return true; }
-
-
-// Unary operations always express primitive values. delete and ! express
-// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
-bool UnaryOperation::IsPrimitive() { return true; }
-
-
-// Count operations (pre- and post-fix increment and decrement) always
-// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2,
-// 11.4.4, ane 11.4.5.
-bool CountOperation::IsPrimitive() { return true; }
-
-
-// Binary operations depend on the operator.
-bool BinaryOperation::IsPrimitive() {
- switch (op()) {
- case Token::COMMA:
- // Value is the value of the right subexpression.
- return right()->IsPrimitive();
-
- case Token::OR:
- case Token::AND:
- // Value is the value one of the subexpressions.
- return left()->IsPrimitive() && right()->IsPrimitive();
-
- default:
- // {|, ^, &, <<, >>, >>>, +, -, *, /, %}
- // Arithmetic operations are always primitive. They express Numbers
- // with the exception of +, which expresses a Number or a String.
- return true;
- }
-}
-
-
-// Compare operations always express Boolean values.
-bool CompareOperation::IsPrimitive() { return true; }
-
-
-// Overridden IsCritical member functions. IsCritical is true for AST nodes
-// whose evaluation is absolutely required (they are never dead) because
-// they are externally visible.
-
-// References to global variables or lookup slots are critical because they
-// may have getters. All others, including parameters rewritten to explicit
-// property references, are not critical.
-bool VariableProxy::IsCritical() {
- Variable* var = AsVariable();
- return var != NULL &&
- (var->slot() == NULL || var->slot()->type() == Slot::LOOKUP);
-}
-
-
-// Literals are never critical.
-bool Literal::IsCritical() { return false; }
-
-
-// Property assignments and throwing of reference errors are always
-// critical. Assignments to escaping variables are also critical. In
-// addition the operation of compound assignments is critical if either of
-// its operands is non-primitive (the arithmetic operations all use one of
-// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands).
-// In this case, we mark the entire AST node as critical because there is
-// no binary operation node to mark.
-bool Assignment::IsCritical() {
- Variable* var = AssignedVariable();
- return var == NULL ||
- !var->IsStackAllocated() ||
- (is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive()));
-}
-
-
-// Property references are always critical, because they may have getters.
-bool Property::IsCritical() { return true; }
-
-
-// Calls are always critical.
-bool Call::IsCritical() { return true; }
-
-
-// +,- use ToNumber on the value of their operand.
-bool UnaryOperation::IsCritical() {
- ASSERT(op() == Token::ADD || op() == Token::SUB);
- return !expression()->IsPrimitive();
-}
-
-
-// Count operations targeting properties and reference errors are always
-// critical. Count operations on escaping variables are critical. Count
-// operations targeting non-primitives are also critical because they use
-// ToNumber.
-bool CountOperation::IsCritical() {
- Variable* var = AssignedVariable();
- return var == NULL ||
- !var->IsStackAllocated() ||
- !expression()->IsPrimitive();
-}
-
-
-// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or
-// ToUint32 on each of their operands.
-bool BinaryOperation::IsCritical() {
- ASSERT(op() != Token::COMMA);
- ASSERT(op() != Token::OR);
- ASSERT(op() != Token::AND);
- return !left()->IsPrimitive() || !right()->IsPrimitive();
-}
-
-
-// <, >, <=, and >= all use ToPrimitive on both their operands.
-bool CompareOperation::IsCritical() {
- ASSERT(op() != Token::EQ);
- ASSERT(op() != Token::NE);
- ASSERT(op() != Token::EQ_STRICT);
- ASSERT(op() != Token::NE_STRICT);
- ASSERT(op() != Token::INSTANCEOF);
- ASSERT(op() != Token::IN);
- return !left()->IsPrimitive() || !right()->IsPrimitive();
-}
-
-
-// Implementation of a copy visitor. The visitor create a deep copy
-// of ast nodes. Nodes that do not require a deep copy are copied
-// with the default copy constructor.
-
-AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
- // AST node number should be unique. Assert that we only copy AstNodes
- // before node numbers are assigned.
- ASSERT(other->num_ == kNoNumber);
-}
-
-
-Statement::Statement(Statement* other)
- : AstNode(other), statement_pos_(other->statement_pos_) {}
-
-
-Expression::Expression(Expression* other)
- : AstNode(other),
- bitfields_(other->bitfields_),
- type_(other->type_) {}
-
-
-BreakableStatement::BreakableStatement(BreakableStatement* other)
- : Statement(other), labels_(other->labels_), type_(other->type_) {}
-
-
-Block::Block(Block* other, ZoneList<Statement*>* statements)
- : BreakableStatement(other),
- statements_(statements->length()),
- is_initializer_block_(other->is_initializer_block_) {
- statements_.AddAll(*statements);
-}
-
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
@@ -795,358 +634,8 @@ WhileStatement::WhileStatement(ZoneStringList* labels)
}
-ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
- Expression* expression)
- : Statement(other), expression_(expression) {}
-
-
-IfStatement::IfStatement(IfStatement* other,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement)
- : Statement(other),
- condition_(condition),
- then_statement_(then_statement),
- else_statement_(else_statement) {}
-
-
-EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
-
-
-IterationStatement::IterationStatement(IterationStatement* other,
- Statement* body)
- : BreakableStatement(other), body_(body) {}
-
-
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {
}
-
-ForStatement::ForStatement(ForStatement* other,
- Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body)
- : IterationStatement(other, body),
- init_(init),
- cond_(cond),
- next_(next),
- may_have_function_literal_(other->may_have_function_literal_),
- loop_variable_(other->loop_variable_),
- peel_this_loop_(other->peel_this_loop_) {}
-
-
-Assignment::Assignment(Assignment* other,
- Expression* target,
- Expression* value)
- : Expression(other),
- op_(other->op_),
- target_(target),
- value_(value),
- pos_(other->pos_),
- block_start_(other->block_start_),
- block_end_(other->block_end_) {}
-
-
-Property::Property(Property* other, Expression* obj, Expression* key)
- : Expression(other),
- obj_(obj),
- key_(key),
- pos_(other->pos_),
- type_(other->type_) {}
-
-
-Call::Call(Call* other,
- Expression* expression,
- ZoneList<Expression*>* arguments)
- : Expression(other),
- expression_(expression),
- arguments_(arguments),
- pos_(other->pos_) {}
-
-
-UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
- : Expression(other), op_(other->op_), expression_(expression) {}
-
-
-BinaryOperation::BinaryOperation(Expression* other,
- Token::Value op,
- Expression* left,
- Expression* right)
- : Expression(other), op_(op), left_(left), right_(right) {}
-
-
-CountOperation::CountOperation(CountOperation* other, Expression* expression)
- : Expression(other),
- is_prefix_(other->is_prefix_),
- op_(other->op_),
- expression_(expression) {}
-
-
-CompareOperation::CompareOperation(CompareOperation* other,
- Expression* left,
- Expression* right)
- : Expression(other),
- op_(other->op_),
- left_(left),
- right_(right) {}
-
-
-Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
- expr_ = NULL;
- if (expr != NULL) Visit(expr);
- return expr_;
-}
-
-
-Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
- stmt_ = NULL;
- if (stmt != NULL) Visit(stmt);
- return stmt_;
-}
-
-
-ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
- ZoneList<Expression*>* expressions) {
- ZoneList<Expression*>* copy =
- new ZoneList<Expression*>(expressions->length());
- for (int i = 0; i < expressions->length(); i++) {
- copy->Add(DeepCopyExpr(expressions->at(i)));
- }
- return copy;
-}
-
-
-ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
- ZoneList<Statement*>* statements) {
- ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
- for (int i = 0; i < statements->length(); i++) {
- copy->Add(DeepCopyStmt(statements->at(i)));
- }
- return copy;
-}
-
-
-void CopyAstVisitor::VisitBlock(Block* stmt) {
- stmt_ = new Block(stmt,
- DeepCopyStmtList(stmt->statements()));
-}
-
-
-void CopyAstVisitor::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
-}
-
-
-void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
- stmt_ = new EmptyStatement(stmt);
-}
-
-
-void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
- stmt_ = new IfStatement(stmt,
- DeepCopyExpr(stmt->condition()),
- DeepCopyStmt(stmt->then_statement()),
- DeepCopyStmt(stmt->else_statement()));
-}
-
-
-void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
- stmt_ = new ForStatement(stmt,
- DeepCopyStmt(stmt->init()),
- DeepCopyExpr(stmt->cond()),
- DeepCopyStmt(stmt->next()),
- DeepCopyStmt(stmt->body()));
-}
-
-
-void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitConditional(Conditional* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
- expr_ = new VariableProxy(*expr);
-}
-
-
-void CopyAstVisitor::VisitLiteral(Literal* expr) {
- expr_ = new Literal(*expr);
-}
-
-
-void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitAssignment(Assignment* expr) {
- expr_ = new Assignment(expr,
- DeepCopyExpr(expr->target()),
- DeepCopyExpr(expr->value()));
-}
-
-
-void CopyAstVisitor::VisitThrow(Throw* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitProperty(Property* expr) {
- expr_ = new Property(expr,
- DeepCopyExpr(expr->obj()),
- DeepCopyExpr(expr->key()));
-}
-
-
-void CopyAstVisitor::VisitCall(Call* expr) {
- expr_ = new Call(expr,
- DeepCopyExpr(expr->expression()),
- DeepCopyExprList(expr->arguments()));
-}
-
-
-void CopyAstVisitor::VisitCallNew(CallNew* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
- expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
-}
-
-
-void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
- expr_ = new CountOperation(expr,
- DeepCopyExpr(expr->expression()));
-}
-
-
-void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
- expr_ = new BinaryOperation(expr,
- expr->op(),
- DeepCopyExpr(expr->left()),
- DeepCopyExpr(expr->right()));
-}
-
-
-void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
- expr_ = new CompareOperation(expr,
- DeepCopyExpr(expr->left()),
- DeepCopyExpr(expr->right()));
-}
-
-
-void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index b9a7a3dd7a..9fcf25672f 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -89,9 +89,11 @@ namespace internal {
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
+ V(IncrementOperation) \
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
+ V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@@ -118,12 +120,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
- static const int kNoNumber = -1;
-
- AstNode() : num_(kNoNumber) {}
-
- explicit AstNode(AstNode* other);
-
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -151,20 +147,6 @@ class AstNode: public ZoneObject {
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
-
- // True if the AST node is critical (its execution is needed or externally
- // visible in some way).
- virtual bool IsCritical() {
- UNREACHABLE();
- return true;
- }
-
- int num() { return num_; }
- void set_num(int n) { num_ = n; }
-
- private:
- // Support for ast node numbering.
- int num_;
};
@@ -172,8 +154,6 @@ class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
- explicit Statement(Statement* other);
-
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
@@ -201,48 +181,33 @@ class Expression: public AstNode {
// Evaluated for its value (and side effects).
kValue,
// Evaluated for control flow (and side effects).
- kTest,
- // Evaluated for control flow and side effects. Value is also
- // needed if true.
- kValueTest,
- // Evaluated for control flow and side effects. Value is also
- // needed if false.
- kTestValue
+ kTest
};
Expression() : bitfields_(0) {}
- explicit Expression(Expression* other);
-
virtual Expression* AsExpression() { return this; }
+ virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
- virtual Variable* AssignedVariable() { return NULL; }
-
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
- // True if the expression does not have (evaluated) subexpressions.
- // Function literals are leaves because their subexpressions are not
- // evaluated.
- virtual bool IsLeaf() { return false; }
-
- // True if the expression has no side effects and is safe to
- // evaluate out of order.
- virtual bool IsTrivial() { return false; }
-
- // True if the expression always has one of the non-Object JS types
- // (Undefined, Null, Boolean, String, or Number).
- virtual bool IsPrimitive() = 0;
-
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
virtual void MarkAsStatement() { /* do nothing */ }
+ // True iff the result can be safely overwritten (to avoid allocation).
+ // False for operations that can return one of their operands.
+ virtual bool ResultOverwriteAllowed() { return false; }
+
+ // True iff the expression is a literal represented as a smi.
+ virtual bool IsSmiLiteral() { return false; }
+
// Static type information for this expression.
StaticType* type() { return &type_; }
@@ -259,7 +224,8 @@ class Expression: public AstNode {
// top operation is a bit operation with a mask, or a shift.
bool GuaranteedSmiResult();
- // AST analysis results
+ // AST analysis results.
+ void CopyAnalysisResultsFrom(Expression* other);
// True if the expression rooted at this node can be compiled by the
// side-effect free compiler.
@@ -320,11 +286,6 @@ class ValidLeftHandSideSentinel: public Expression {
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
static ValidLeftHandSideSentinel* instance() { return &instance_; }
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
private:
static ValidLeftHandSideSentinel instance_;
};
@@ -353,8 +314,6 @@ class BreakableStatement: public Statement {
protected:
inline BreakableStatement(ZoneStringList* labels, Type type);
- explicit BreakableStatement(BreakableStatement* other);
-
private:
ZoneStringList* labels_;
Type type_;
@@ -366,10 +325,6 @@ class Block: public BreakableStatement {
public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
- // Construct a clone initialized from the original block and
- // a deep copy of all statements of the original block.
- Block(Block* other, ZoneList<Statement*>* statements);
-
virtual void Accept(AstVisitor* v);
virtual Block* AsBlock() { return this; }
@@ -433,10 +388,6 @@ class IterationStatement: public BreakableStatement {
protected:
explicit inline IterationStatement(ZoneStringList* labels);
- // Construct a clone initialized from original and
- // a deep copy of the original body.
- IterationStatement(IterationStatement* other, Statement* body);
-
void Initialize(Statement* body) {
body_ = body;
}
@@ -486,13 +437,14 @@ class WhileStatement: public IterationStatement {
bool may_have_function_literal() const {
return may_have_function_literal_;
}
+ void set_may_have_function_literal(bool value) {
+ may_have_function_literal_ = value;
+ }
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
-
- friend class AstOptimizer;
};
@@ -500,14 +452,6 @@ class ForStatement: public IterationStatement {
public:
explicit inline ForStatement(ZoneStringList* labels);
- // Construct a for-statement initialized from another for-statement
- // and deep copies of all parts of the original statement.
- ForStatement(ForStatement* other,
- Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body);
-
virtual ForStatement* AsForStatement() { return this; }
void Initialize(Statement* init,
@@ -528,17 +472,18 @@ class ForStatement: public IterationStatement {
void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; }
+
bool may_have_function_literal() const {
return may_have_function_literal_;
}
+ void set_may_have_function_literal(bool value) {
+ may_have_function_literal_ = value;
+ }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
- bool peel_this_loop() { return peel_this_loop_; }
- void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
-
private:
Statement* init_;
Expression* cond_;
@@ -546,9 +491,6 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- bool peel_this_loop_;
-
- friend class AstOptimizer;
};
@@ -578,10 +520,6 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
- // Construct an expression statement initialized from another
- // expression statement and a deep copy of the original expression.
- ExpressionStatement(ExpressionStatement* other, Expression* expression);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion.
@@ -721,13 +659,6 @@ class IfStatement: public Statement {
then_statement_(then_statement),
else_statement_(else_statement) { }
- // Construct an if-statement initialized from another if-statement
- // and deep copies of all parts of the original.
- IfStatement(IfStatement* other,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement);
-
virtual void Accept(AstVisitor* v);
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@@ -834,8 +765,6 @@ class EmptyStatement: public Statement {
public:
EmptyStatement() {}
- explicit EmptyStatement(EmptyStatement* other);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion.
@@ -848,6 +777,8 @@ class Literal: public Expression {
explicit Literal(Handle<Object> handle) : handle_(handle) { }
virtual void Accept(AstVisitor* v);
+ virtual bool IsTrivial() { return true; }
+ virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Type testing & conversion.
virtual Literal* AsLiteral() { return this; }
@@ -865,11 +796,6 @@ class Literal: public Expression {
return false;
}
- virtual bool IsLeaf() { return true; }
- virtual bool IsTrivial() { return true; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -916,7 +842,6 @@ class ObjectLiteral: public MaterializedLiteral {
// to the code generator.
class Property: public ZoneObject {
public:
-
enum Kind {
CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time).
@@ -954,10 +879,6 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return properties()->is_empty(); }
-
- virtual bool IsPrimitive();
-
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
@@ -984,10 +905,6 @@ class RegExpLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive();
-
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@@ -1012,10 +929,6 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
- virtual bool IsLeaf() { return values()->is_empty(); }
-
- virtual bool IsPrimitive();
-
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -1036,8 +949,6 @@ class CatchExtensionObject: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@@ -1055,7 +966,10 @@ class VariableProxy: public Expression {
virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty();
}
- virtual VariableProxy* AsVariableProxy() { return this; }
+
+ virtual VariableProxy* AsVariableProxy() {
+ return this;
+ }
Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
@@ -1065,20 +979,12 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
- virtual bool IsLeaf() {
- ASSERT(var_ != NULL); // Variable must be resolved.
- return var()->is_global() || var()->rewrite()->IsLeaf();
+ virtual bool IsTrivial() {
+ // Reading from a mutable variable is a side effect, but the
+ // variable for 'this' is immutable.
+ return is_this_ || is_trivial_;
}
- // Reading from a mutable variable is a side effect, but 'this' is
- // immutable.
- virtual bool IsTrivial() { return is_trivial_; }
-
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
- void SetIsPrimitive(bool value) { is_primitive_ = value; }
-
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@@ -1092,11 +998,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
- bool is_trivial() { return is_trivial_; }
- void set_is_trivial(bool b) { is_trivial_ = b; }
- BitVector* reaching_definitions() { return reaching_definitions_; }
- void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; }
+ void MarkAsTrivial() { is_trivial_ = true; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1107,8 +1010,6 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
bool is_trivial_;
- BitVector* reaching_definitions_;
- bool is_primitive_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@@ -1125,11 +1026,6 @@ class VariableProxySentinel: public VariableProxy {
return &identifier_proxy_;
}
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
static VariableProxySentinel this_proxy_;
@@ -1171,13 +1067,6 @@ class Slot: public Expression {
// Type testing & conversion
virtual Slot* AsSlot() { return this; }
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors
@@ -1203,8 +1092,6 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { }
- Property(Property* other, Expression* obj, Expression* key);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion
@@ -1212,9 +1099,6 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
int position() const { return pos_; }
@@ -1240,16 +1124,11 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
-
virtual void Accept(AstVisitor* v);
// Type testing and conversion.
virtual Call* AsCall() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1272,8 +1151,6 @@ class CallNew: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1298,8 +1175,6 @@ class CallRuntime: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1319,16 +1194,12 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op));
}
- UnaryOperation(UnaryOperation* other, Expression* expression);
-
virtual void Accept(AstVisitor* v);
+ virtual bool ResultOverwriteAllowed();
// Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1340,120 +1211,102 @@ class UnaryOperation: public Expression {
class BinaryOperation: public Expression {
public:
- BinaryOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right) {
+ BinaryOperation(Token::Value op,
+ Expression* left,
+ Expression* right,
+ int pos)
+ : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op));
}
- // Construct a binary operation with a given operator and left and right
- // subexpressions. The rest of the expression state is copied from
- // another expression.
- BinaryOperation(Expression* other,
- Token::Value op,
- Expression* left,
- Expression* right);
+ // Create the binary operation corresponding to a compound assignment.
+ explicit BinaryOperation(Assignment* assignment);
virtual void Accept(AstVisitor* v);
+ virtual bool ResultOverwriteAllowed();
// Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
- // True iff the result can be safely overwritten (to avoid allocation).
- // False for operations that can return one of their operands.
- bool ResultOverwriteAllowed() {
- switch (op_) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- return false;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- return true;
- default:
- UNREACHABLE();
- }
- return false;
- }
-
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ int position() const { return pos_; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
+ int pos_;
};
-class CountOperation: public Expression {
+class IncrementOperation: public Expression {
public:
- CountOperation(bool is_prefix, Token::Value op, Expression* expression)
- : is_prefix_(is_prefix), op_(op), expression_(expression) {
+ IncrementOperation(Token::Value op, Expression* expr)
+ : op_(op), expression_(expr) {
ASSERT(Token::IsCountOp(op));
}
- CountOperation(CountOperation* other, Expression* expression);
+ Token::Value op() const { return op_; }
+ bool is_increment() { return op_ == Token::INC; }
+ Expression* expression() const { return expression_; }
virtual void Accept(AstVisitor* v);
- virtual CountOperation* AsCountOperation() { return this; }
+ private:
+ Token::Value op_;
+ Expression* expression_;
+ int pos_;
+};
- virtual Variable* AssignedVariable() {
- return expression()->AsVariableProxy()->AsVariable();
- }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
+class CountOperation: public Expression {
+ public:
+ CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
+ : is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
+
+ virtual void Accept(AstVisitor* v);
+
+ virtual CountOperation* AsCountOperation() { return this; }
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
- Token::Value op() const { return op_; }
+
+ Token::Value op() const { return increment_->op(); }
Token::Value binary_op() {
- return op_ == Token::INC ? Token::ADD : Token::SUB;
+ return (op() == Token::INC) ? Token::ADD : Token::SUB;
}
- Expression* expression() const { return expression_; }
+
+ Expression* expression() const { return increment_->expression(); }
+ IncrementOperation* increment() const { return increment_; }
+ int position() const { return pos_; }
virtual void MarkAsStatement() { is_prefix_ = true; }
private:
bool is_prefix_;
- Token::Value op_;
- Expression* expression_;
+ IncrementOperation* increment_;
+ int pos_;
};
class CompareOperation: public Expression {
public:
- CompareOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right) {
+ CompareOperation(Token::Value op,
+ Expression* left,
+ Expression* right,
+ int pos)
+ : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
- CompareOperation(CompareOperation* other,
- Expression* left,
- Expression* right);
-
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ int position() const { return pos_; }
// Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; }
@@ -1462,6 +1315,24 @@ class CompareOperation: public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
+ int pos_;
+};
+
+
+class CompareToNull: public Expression {
+ public:
+ CompareToNull(bool is_strict, Expression* expression)
+ : is_strict_(is_strict), expression_(expression) { }
+
+ virtual void Accept(AstVisitor* v);
+
+ bool is_strict() const { return is_strict_; }
+ Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
+ Expression* expression() const { return expression_; }
+
+ private:
+ bool is_strict_;
+ Expression* expression_;
};
@@ -1480,8 +1351,6 @@ class Conditional: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@@ -1506,20 +1375,11 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op));
}
- Assignment(Assignment* other, Expression* target, Expression* value);
-
virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
- virtual Variable* AssignedVariable() {
- return target()->AsVariableProxy()->AsVariable();
- }
-
Token::Value binary_op() const;
Token::Value op() const { return op_; }
@@ -1555,8 +1415,6 @@ class Throw: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@@ -1578,7 +1436,8 @@ class FunctionLiteral: public Expression {
int num_parameters,
int start_position,
int end_position,
- bool is_expression)
+ bool is_expression,
+ bool contains_loops)
: name_(name),
scope_(scope),
body_(body),
@@ -1591,6 +1450,7 @@ class FunctionLiteral: public Expression {
start_position_(start_position),
end_position_(end_position),
is_expression_(is_expression),
+ contains_loops_(contains_loops),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false) {
@@ -1604,10 +1464,6 @@ class FunctionLiteral: public Expression {
// Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; }
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive();
-
Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
@@ -1616,6 +1472,7 @@ class FunctionLiteral: public Expression {
int start_position() const { return start_position_; }
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
+ bool contains_loops() const { return contains_loops_; }
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -1656,6 +1513,7 @@ class FunctionLiteral: public Expression {
int start_position_;
int end_position_;
bool is_expression_;
+ bool contains_loops_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_full_codegen_;
@@ -1675,12 +1533,8 @@ class SharedFunctionInfoLiteral: public Expression {
return shared_function_info_;
}
- virtual bool IsLeaf() { return true; }
-
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
private:
Handle<SharedFunctionInfo> shared_function_info_;
};
@@ -1689,8 +1543,6 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression {
public:
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return true; }
- virtual bool IsPrimitive();
};
@@ -1895,7 +1747,7 @@ class RegExpText: public RegExpTree {
void AddElement(TextElement elm) {
elements_.Add(elm);
length_ += elm.length();
- };
+ }
ZoneList<TextElement>* elements() { return &elements_; }
private:
ZoneList<TextElement> elements_;
@@ -2078,29 +1930,6 @@ class AstVisitor BASE_EMBEDDED {
bool stack_overflow_;
};
-
-class CopyAstVisitor : public AstVisitor {
- public:
- Expression* DeepCopyExpr(Expression* expr);
-
- Statement* DeepCopyStmt(Statement* stmt);
-
- private:
- ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
-
- ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Holds the result of copying an expression.
- Expression* expr_;
- // Holds the result of copying a statement.
- Statement* stmt_;
-};
-
} } // namespace v8::internal
#endif // V8_AST_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ce8e98d6a5..a82d1d6966 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -232,6 +232,7 @@ class Genesis BASE_EMBEDDED {
bool InstallNatives();
void InstallCustomCallGenerators();
void InstallJSFunctionResultCaches();
+ void InitializeNormalizedMapCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
static bool InstallExtensions(Handle<Context> global_context,
@@ -719,6 +720,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
Top::initial_object_prototype(), Builtins::Illegal,
true);
+ string_fun->shared()->set_construct_stub(
+ Builtins::builtin(Builtins::StringConstructCode));
global_context()->set_string_function(*string_fun);
// Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors =
@@ -1400,6 +1403,13 @@ void Genesis::InstallJSFunctionResultCaches() {
}
+void Genesis::InitializeNormalizedMapCaches() {
+ Handle<FixedArray> array(
+ Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
+}
+
+
int BootstrapperActive::nesting_ = 0;
@@ -1768,6 +1778,7 @@ Genesis::Genesis(Handle<Object> global_object,
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches();
+ InitializeNormalizedMapCaches();
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 3a0393efbc..b4f4a0611a 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -243,7 +243,7 @@ BUILTIN(ArrayCodeGeneric) {
}
-static Object* AllocateJSArray() {
+MUST_USE_RESULT static Object* AllocateJSArray() {
JSFunction* array_function =
Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function);
@@ -252,7 +252,7 @@ static Object* AllocateJSArray() {
}
-static Object* AllocateEmptyJSArray() {
+MUST_USE_RESULT static Object* AllocateEmptyJSArray() {
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
@@ -269,6 +269,7 @@ static void CopyElements(AssertNoAllocation* no_gc,
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
+ ASSERT(dst->map() != Heap::fixed_cow_array_map());
ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index,
@@ -286,6 +287,7 @@ static void MoveElements(AssertNoAllocation* no_gc,
FixedArray* src,
int src_index,
int len) {
+ ASSERT(dst->map() != Heap::fixed_cow_array_map());
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
@@ -297,17 +299,17 @@ static void MoveElements(AssertNoAllocation* no_gc,
static void FillWithHoles(FixedArray* dst, int from, int to) {
+ ASSERT(dst->map() != Heap::fixed_cow_array_map());
MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
- // For now this trick is only applied to fixed arrays in new space.
+ ASSERT(elms->map() != Heap::fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- // In old space we do not use this trick to avoid dealing with
- // region dirty marks.
- ASSERT(Heap::new_space()->Contains(elms));
+ ASSERT(!Heap::lo_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
@@ -317,6 +319,17 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
const int len = elms->length();
+ if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
+ !Heap::new_space()->Contains(elms)) {
+ // If we are doing a big trim in old space then we zap the space that was
+ // formerly part of the array so that the GC (aided by the card-based
+ // remembered set) won't find pointers to new-space there.
+ Object** zap = reinterpret_cast<Object**>(elms->address());
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+ }
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
@@ -325,9 +338,8 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
- ASSERT_EQ(elms->address() + to_trim * kPointerSize,
- (elms + to_trim * kPointerSize)->address());
- return elms + to_trim * kPointerSize;
+ return FixedArray::cast(HeapObject::FromAddress(
+ elms->address() + to_trim * kPointerSize));
}
@@ -348,33 +360,24 @@ static bool ArrayPrototypeHasNoElements(Context* global_context,
}
-static bool IsJSArrayWithFastElements(Object* receiver,
- FixedArray** elements) {
- if (!receiver->IsJSArray()) {
- return false;
- }
-
+static inline Object* EnsureJSArrayWithWritableFastElements(Object* receiver) {
+ if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
-
HeapObject* elms = HeapObject::cast(array->elements());
- if (elms->map() != Heap::fixed_array_map()) {
- return false;
+ if (elms->map() == Heap::fixed_array_map()) return elms;
+ if (elms->map() == Heap::fixed_cow_array_map()) {
+ return array->EnsureWritableFastElements();
}
-
- *elements = FixedArray::cast(elms);
- return true;
+ return NULL;
}
-static bool IsFastElementMovingAllowed(Object* receiver,
- FixedArray** elements) {
- if (!IsJSArrayWithFastElements(receiver, elements)) return false;
-
+static inline bool IsJSArrayFastElementMovingAllowed(JSArray* receiver) {
Context* global_context = Top::context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
- if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false;
- return ArrayPrototypeHasNoElements(global_context, array_proto);
+ return receiver->GetPrototype() == array_proto &&
+ ArrayPrototypeHasNoElements(global_context, array_proto);
}
@@ -405,10 +408,10 @@ static Object* CallJsBuiltin(const char* name,
BUILTIN(ArrayPush) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)) {
- return CallJsBuiltin("ArrayPush", args);
- }
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj == NULL) return CallJsBuiltin("ArrayPush", args);
+ if (elms_obj->IsFailure()) return elms_obj;
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
@@ -454,10 +457,10 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)) {
- return CallJsBuiltin("ArrayPop", args);
- }
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj == NULL) return CallJsBuiltin("ArrayPop", args);
+ if (elms_obj->IsFailure()) return elms_obj;
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
@@ -483,10 +486,13 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsFastElementMovingAllowed(receiver, &elms)) {
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj->IsFailure()) return elms_obj;
+ if (elms_obj == NULL ||
+ !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArrayShift", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
@@ -499,8 +505,8 @@ BUILTIN(ArrayShift) {
first = Heap::undefined_value();
}
- if (Heap::new_space()->Contains(elms)) {
- // As elms still in the same space they used to be (new space),
+ if (!Heap::lo_space()->Contains(elms)) {
+ // As elms still in the same space they used to be,
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
@@ -519,10 +525,13 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsFastElementMovingAllowed(receiver, &elms)) {
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj->IsFailure()) return elms_obj;
+ if (elms_obj == NULL ||
+ !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArrayUnshift", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
@@ -568,10 +577,13 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsFastElementMovingAllowed(receiver, &elms)) {
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj->IsFailure()) return elms_obj;
+ if (elms_obj == NULL ||
+ !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArraySlice", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
@@ -637,10 +649,13 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
- FixedArray* elms = NULL;
- if (!IsFastElementMovingAllowed(receiver, &elms)) {
+ Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
+ if (elms_obj->IsFailure()) return elms_obj;
+ if (elms_obj == NULL ||
+ !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArraySplice", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
@@ -648,13 +663,9 @@ BUILTIN(ArraySplice) {
int n_arguments = args.length() - 1;
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
+ // Return empty array when no arguments are supplied.
if (n_arguments == 0) {
- return Heap::undefined_value();
+ return AllocateEmptyJSArray();
}
int relative_start = 0;
@@ -717,7 +728,7 @@ BUILTIN(ArraySplice) {
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = Heap::new_space()->Contains(elms) &&
+ const bool trim_array = !Heap::lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 375e8f3f89..7e49f3133a 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -117,7 +117,10 @@ enum BuiltinExtraArguments {
V(FunctionApply, BUILTIN, UNINITIALIZED) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED)
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
+ \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -258,6 +261,8 @@ class Builtins : public AllStatic {
static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_ArrayConstructCode(MacroAssembler* masm);
+
+ static void Generate_StringConstructCode(MacroAssembler* masm);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index fadbc9afbe..0dfc80d0b8 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -34,6 +34,14 @@ namespace v8 {
namespace internal {
+// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
+// Else, return something outside of 'A'-'Z' and 'a'-'z'.
+// Note: it ignores LOCALE.
+inline int AsciiAlphaToLower(uc32 c) {
+ return c | 0x20;
+}
+
+
inline bool IsCarriageReturn(uc32 c) {
return c == 0x000D;
}
@@ -59,12 +67,12 @@ inline bool IsDecimalDigit(uc32 c) {
inline bool IsHexDigit(uc32 c) {
// ECMA-262, 3rd, 7.6 (p 15)
- return IsDecimalDigit(c) || IsInRange(c | 0x20, 'a', 'f');
+ return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
}
inline bool IsRegExpWord(uc16 c) {
- return IsInRange(c | 0x20, 'a', 'z')
+ return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)
|| (c == '_');
}
diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc
index af650de5e7..928c3f0c05 100644
--- a/deps/v8/src/circular-queue.cc
+++ b/deps/v8/src/circular-queue.cc
@@ -47,8 +47,9 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2);
- // Only need to keep the first cell of a chunk clean.
- for (int i = 0; i < buffer_size_; i += chunk_size_) {
+ // Clean up the whole buffer to avoid encountering a random kEnd
+ // while enqueuing.
+ for (int i = 0; i < buffer_size_; ++i) {
buffer_[i] = kClear;
}
buffer_[buffer_size_] = kEnd;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index e5a222fcda..98a5cf67b9 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -29,6 +29,7 @@
#define V8_CODE_STUBS_H_
#include "globals.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -80,6 +81,14 @@ namespace internal {
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
+// Types of uncatchable exceptions.
+enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
+
+
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
@@ -101,10 +110,16 @@ class CodeStub BASE_EMBEDDED {
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
- };
+ }
static int MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key);
- };
+ }
+
+ // Gets the major key from a code object that is a code stub or binary op IC.
+ static Major GetMajorKey(Code* code_stub) {
+ return static_cast<Major>(code_stub->major_key());
+ }
+
static const char* MajorName(Major major_key, bool allow_unknown_keys);
virtual ~CodeStub() {}
@@ -172,6 +187,609 @@ class CodeStub BASE_EMBEDDED {
friend class BreakPointIterator;
};
+
+// Helper interface to prepare to/restore after making runtime calls.
+class RuntimeCallHelper {
+ public:
+ virtual ~RuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const = 0;
+
+ virtual void AfterCall(MacroAssembler* masm) const = 0;
+
+ protected:
+ RuntimeCallHelper() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
+};
+
+} } // namespace v8::internal
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/code-stubs-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/code-stubs-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/code-stubs-mips.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// newly created internal frame before/after the runtime call.
+class ICRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ ICRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+};
+
+
+// Trivial RuntimeCallHelper implementation.
+class NopRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ NopRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const {}
+
+ virtual void AfterCall(MacroAssembler* masm) const {}
+};
+
+
+class StackCheckStub : public CodeStub {
+ public:
+ StackCheckStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+
+ const char* GetName() { return "StackCheckStub"; }
+
+ Major MajorKey() { return StackCheck; }
+ int MinorKey() { return 0; }
+};
+
+
+class FastNewClosureStub : public CodeStub {
+ public:
+ void Generate(MacroAssembler* masm);
+
+ private:
+ const char* GetName() { return "FastNewClosureStub"; }
+ Major MajorKey() { return FastNewClosure; }
+ int MinorKey() { return 0; }
+};
+
+
+class FastNewContextStub : public CodeStub {
+ public:
+ static const int kMaximumSlots = 64;
+
+ explicit FastNewContextStub(int slots) : slots_(slots) {
+ ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int slots_;
+
+ const char* GetName() { return "FastNewContextStub"; }
+ Major MajorKey() { return FastNewContext; }
+ int MinorKey() { return slots_; }
+};
+
+
+class FastCloneShallowArrayStub : public CodeStub {
+ public:
+ // Maximum length of copied elements array.
+ static const int kMaximumClonedLength = 8;
+
+ enum Mode {
+ CLONE_ELEMENTS,
+ COPY_ON_WRITE_ELEMENTS
+ };
+
+ FastCloneShallowArrayStub(Mode mode, int length)
+ : mode_(mode),
+ length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
+ ASSERT(length_ >= 0);
+ ASSERT(length_ <= kMaximumClonedLength);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Mode mode_;
+ int length_;
+
+ const char* GetName() { return "FastCloneShallowArrayStub"; }
+ Major MajorKey() { return FastCloneShallowArray; }
+ int MinorKey() {
+ ASSERT(mode_ == 0 || mode_ == 1);
+ return (length_ << 1) | mode_;
+ }
+};
+
+
+class InstanceofStub: public CodeStub {
+ public:
+ InstanceofStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return Instanceof; }
+ int MinorKey() { return 0; }
+};
+
+
+enum NegativeZeroHandling {
+ kStrictNegativeZero,
+ kIgnoreNegativeZero
+};
+
+
+class GenericUnaryOpStub : public CodeStub {
+ public:
+ GenericUnaryOpStub(Token::Value op,
+ UnaryOverwriteMode overwrite,
+ NegativeZeroHandling negative_zero = kStrictNegativeZero)
+ : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode overwrite_;
+ NegativeZeroHandling negative_zero_;
+
+ class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
+ class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
+
+ Major MajorKey() { return GenericUnaryOp; }
+ int MinorKey() {
+ return OpField::encode(op_) |
+ OverwriteField::encode(overwrite_) |
+ NegativeZeroField::encode(negative_zero_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName();
+};
+
+
+enum NaNInformation {
+ kBothCouldBeNaN,
+ kCantBothBeNaN
+};
+
+
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc,
+ bool strict,
+ NaNInformation nan_info = kBothCouldBeNaN,
+ bool include_number_compare = true,
+ Register lhs = no_reg,
+ Register rhs = no_reg) :
+ cc_(cc),
+ strict_(strict),
+ never_nan_nan_(nan_info == kCantBothBeNaN),
+ include_number_compare_(include_number_compare),
+ lhs_(lhs),
+ rhs_(rhs),
+ name_(NULL) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+ // Only used for 'equal' comparisons. Tells the stub that we already know
+ // that at least one side of the comparison is not NaN. This allows the
+ // stub to use object identity in the positive case. We ignore it when
+ // generating the minor key for other comparisons to avoid creating more
+ // stubs.
+ bool never_nan_nan_;
+ // Do generate the number comparison code in the stub. Stubs without number
+ // comparison code is used when the number comparison has been inlined, and
+ // the stub will be called if one of the operands is not a number.
+ bool include_number_compare_;
+ // Register holding the left hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register lhs_;
+ // Register holding the right hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register rhs_;
+
+ // Encoding of the minor key CCCCCCCCCCCCRCNS.
+ class StrictField: public BitField<bool, 0, 1> {};
+ class NeverNanNanField: public BitField<bool, 1, 1> {};
+ class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
+ class RegisterField: public BitField<bool, 3, 1> {};
+ class ConditionField: public BitField<int, 4, 12> {};
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey();
+
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch);
+
+ // Unfortunately you have to run without snapshots to see most of these
+ // names in the profile since most compare stubs end up in the snapshot.
+ char* name_;
+ const char* GetName();
+#ifdef DEBUG
+ void Print() {
+ PrintF("CompareStub (cc %d), (strict %s), "
+ "(never_nan_nan %s), (number_compare %s) ",
+ static_cast<int>(cc_),
+ strict_ ? "true" : "false",
+ never_nan_nan_ ? "true" : "false",
+ include_number_compare_ ? "included" : "not included");
+
+ if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
+ PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
+ } else {
+ PrintF("\n");
+ }
+ }
+#endif
+};
+
+
+class CEntryStub : public CodeStub {
+ public:
+ explicit CEntryStub(int result_size) : result_size_(result_size) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ void GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int alignment_skew = 0);
+ void GenerateThrowTOS(MacroAssembler* masm);
+ void GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type);
+
+ // Number of pointers/values returned.
+ const int result_size_;
+
+ Major MajorKey() { return CEntry; }
+ // Minor key must differ if different result_size_ values means different
+ // code is generated.
+ int MinorKey();
+
+ const char* GetName() { return "CEntryStub"; }
+};
+
+
+class ApiGetterEntryStub : public CodeStub {
+ public:
+ ApiGetterEntryStub(Handle<AccessorInfo> info,
+ ApiFunction* fun)
+ : info_(info),
+ fun_(fun) { }
+ void Generate(MacroAssembler* masm);
+ virtual bool has_custom_cache() { return true; }
+ virtual bool GetCustomCache(Code** code_out);
+ virtual void SetCustomCache(Code* value);
+
+ static const int kStackSpace = 5;
+ static const int kArgc = 4;
+ private:
+ Handle<AccessorInfo> info() { return info_; }
+ ApiFunction* fun() { return fun_; }
+ Major MajorKey() { return NoCache; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ApiEntryStub"; }
+ // The accessor info associated with the function.
+ Handle<AccessorInfo> info_;
+ // The function to be called.
+ ApiFunction* fun_;
+};
+
+
+class JSEntryStub : public CodeStub {
+ public:
+ JSEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+ void GenerateBody(MacroAssembler* masm, bool is_construct);
+
+ private:
+ Major MajorKey() { return JSEntry; }
+ int MinorKey() { return 0; }
+
+ const char* GetName() { return "JSEntryStub"; }
+};
+
+
+class JSConstructEntryStub : public JSEntryStub {
+ public:
+ JSConstructEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+ int MinorKey() { return 1; }
+
+ const char* GetName() { return "JSConstructEntryStub"; }
+};
+
+
+class ArgumentsAccessStub: public CodeStub {
+ public:
+ enum Type {
+ READ_ELEMENT,
+ NEW_OBJECT
+ };
+
+ explicit ArgumentsAccessStub(Type type) : type_(type) { }
+
+ private:
+ Type type_;
+
+ Major MajorKey() { return ArgumentsAccess; }
+ int MinorKey() { return type_; }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateReadElement(MacroAssembler* masm);
+ void GenerateNewObject(MacroAssembler* masm);
+
+ const char* GetName() { return "ArgumentsAccessStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("ArgumentsAccessStub (type %d)\n", type_);
+ }
+#endif
+};
+
+
+class RegExpExecStub: public CodeStub {
+ public:
+ RegExpExecStub() { }
+
+ private:
+ Major MajorKey() { return RegExpExec; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "RegExpExecStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RegExpExecStub\n");
+ }
+#endif
+};
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
+ : argc_(argc), in_loop_(in_loop), flags_(flags) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+ CallFunctionFlags flags_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
+ argc_,
+ static_cast<int>(in_loop_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
+ class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
+ class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
+ class ArgcBits: public BitField<int, 2, 32 - 2> {};
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() {
+ // Encode the parameters in a unique 32 bit value.
+ return InLoopBits::encode(in_loop_)
+ | FlagBits::encode(flags_)
+ | ArgcBits::encode(argc_);
+ }
+
+ InLoopFlag InLoop() { return in_loop_; }
+ bool ReceiverMightBeValue() {
+ return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+ }
+
+ public:
+ static int ExtractArgcFromMinorKey(int minor_key) {
+ return ArgcBits::decode(minor_key);
+ }
+};
+
+
+enum StringIndexFlags {
+ // Accepts smis or heap numbers.
+ STRING_INDEX_IS_NUMBER,
+
+ // Accepts smis or heap numbers that are valid array indices
+ // (ECMA-262 15.4). Invalid indices are reported as being out of
+ // range.
+ STRING_INDEX_IS_ARRAY_INDEX
+};
+
+
+// Generates code implementing String.prototype.charCodeAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch| and |result| are clobbered.
+class StringCharCodeAtGenerator {
+ public:
+ StringCharCodeAtGenerator(Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : object_(object),
+ index_(index),
+ scratch_(scratch),
+ result_(result),
+ receiver_not_string_(receiver_not_string),
+ index_not_number_(index_not_number),
+ index_out_of_range_(index_out_of_range),
+ index_flags_(index_flags) {
+ ASSERT(!scratch_.is(object_));
+ ASSERT(!scratch_.is(index_));
+ ASSERT(!scratch_.is(result_));
+ ASSERT(!result_.is(object_));
+ ASSERT(!result_.is(index_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register object_;
+ Register index_;
+ Register scratch_;
+ Register result_;
+
+ Label* receiver_not_string_;
+ Label* index_not_number_;
+ Label* index_out_of_range_;
+
+ StringIndexFlags index_flags_;
+
+ Label call_runtime_;
+ Label index_not_smi_;
+ Label got_smi_index_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
+};
+
+
+// Generates code for creating a one-char string from a char code.
+class StringCharFromCodeGenerator {
+ public:
+ StringCharFromCodeGenerator(Register code,
+ Register result)
+ : code_(code),
+ result_(result) {
+ ASSERT(!code_.is(result_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register code_;
+ Register result_;
+
+ Label slow_case_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
+};
+
+
+// Generates code implementing String.prototype.charAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
+class StringCharAtGenerator {
+ public:
+ StringCharAtGenerator(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : char_code_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ receiver_not_string,
+ index_not_number,
+ index_out_of_range,
+ index_flags),
+ char_from_code_generator_(scratch2, result) {}
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ StringCharCodeAtGenerator char_code_at_generator_;
+ StringCharFromCodeGenerator char_from_code_generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index a9fab43f39..148cefca40 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -339,6 +339,11 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
}
+void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
// List of special runtime calls which are generated inline. For some of these
// functions the code will be generated inline, and for others a call to a code
// stub will be inlined.
@@ -380,21 +385,6 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
}
-bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
- const CodeGenerator::InlineRuntimeLUT& new_entry,
- CodeGenerator::InlineRuntimeLUT* old_entry) {
- InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
- if (entry == NULL) return false;
- if (old_entry != NULL) {
- old_entry->name = entry->name;
- old_entry->method = entry->method;
- }
- entry->name = new_entry.name;
- entry->method = new_entry.method;
- return true;
-}
-
-
int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
CodeGenerator::InlineRuntimeLUT* f =
CodeGenerator::FindInlineRuntimeLUT(name);
@@ -496,12 +486,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
+ ASSERT(result_size_ == 1 || result_size_ == 2);
#ifdef _WIN64
- return ExitFrameModeBits::encode(mode_)
- | IndirectResultBits::encode(result_size_ > 1);
+ return result_size_ == 1 ? 0 : 1;
#else
- return ExitFrameModeBits::encode(mode_);
+ return 0;
#endif
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 3b31c04f92..aa2d4422c3 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -64,7 +64,6 @@
// DeclareGlobals
// FindInlineRuntimeLUT
// CheckForInlineRuntimeCall
-// PatchInlineRuntimeEntry
// AnalyzeCondition
// CodeForFunctionPosition
// CodeForReturnPosition
@@ -73,13 +72,6 @@
// CodeForSourcePosition
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
-
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \
@@ -108,6 +100,7 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \
F(RegExpConstructResult, 3, 1) \
+ F(RegExpCloneResult, 1, 1) \
F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \
F(SwapElements, 3, 1) \
@@ -115,7 +108,9 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
F(MathSqrt, 1, 1) \
- F(IsRegExpEquivalent, 2, 1)
+ F(IsRegExpEquivalent, 2, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1)
#if V8_TARGET_ARCH_IA32
@@ -135,29 +130,6 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
namespace v8 {
namespace internal {
-// Support for "structured" code comments.
-#ifdef DEBUG
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler* masm, const char* msg);
- ~Comment();
-
- private:
- MacroAssembler* masm_;
- const char* msg_;
-};
-
-#else
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler*, const char*) {}
-};
-
-#endif // DEBUG
-
-
// Code generation can be nested. Code generation scopes form a stack
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
@@ -181,6 +153,7 @@ class CodeGeneratorScope BASE_EMBEDDED {
CodeGenerator* previous_;
};
+
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
@@ -229,23 +202,6 @@ class FrameRegisterState {
#endif
-// Helper interface to prepare to/restore after making runtime calls.
-class RuntimeCallHelper {
- public:
- virtual ~RuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const = 0;
-
- virtual void AfterCall(MacroAssembler* masm) const = 0;
-
- protected:
- RuntimeCallHelper() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
-};
-
-
// RuntimeCallHelper implementation that saves/restores state of a
// virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
@@ -263,29 +219,6 @@ class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
};
-// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
-// newly created internal frame before/after the runtime call.
-class ICRuntimeCallHelper : public RuntimeCallHelper {
- public:
- ICRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-};
-
-
-// Trivial RuntimeCallHelper implementation.
-class NopRuntimeCallHelper : public RuntimeCallHelper {
- public:
- NopRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const {}
-
- virtual void AfterCall(MacroAssembler* masm) const {}
-};
-
-
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@@ -348,547 +281,7 @@ class DeferredCode: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
-class StackCheckStub : public CodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
-
- const char* GetName() { return "StackCheckStub"; }
-
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewClosureStub : public CodeStub {
- public:
- void Generate(MacroAssembler* masm);
-
- private:
- const char* GetName() { return "FastNewClosureStub"; }
- Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewContextStub : public CodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- const char* GetName() { return "FastNewContextStub"; }
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastCloneShallowArrayStub : public CodeStub {
- public:
- static const int kMaximumLength = 8;
-
- explicit FastCloneShallowArrayStub(int length) : length_(length) {
- ASSERT(length >= 0 && length <= kMaximumLength);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int length_;
-
- const char* GetName() { return "FastCloneShallowArrayStub"; }
- Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() { return length_; }
-};
-
-
-class InstanceofStub: public CodeStub {
- public:
- InstanceofStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Instanceof; }
- int MinorKey() { return 0; }
-};
-
-
-enum NegativeZeroHandling {
- kStrictNegativeZero,
- kIgnoreNegativeZero
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
- GenericUnaryOpStub(Token::Value op,
- UnaryOverwriteMode overwrite,
- NegativeZeroHandling negative_zero = kStrictNegativeZero)
- : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode overwrite_;
- NegativeZeroHandling negative_zero_;
-
- class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
- class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
- class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
-
- Major MajorKey() { return GenericUnaryOp; }
- int MinorKey() {
- return OpField::encode(op_) |
- OverwriteField::encode(overwrite_) |
- NegativeZeroField::encode(negative_zero_);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName();
-};
-
-
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
-
-
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- NaNInformation nan_info = kBothCouldBeNaN,
- bool include_number_compare = true,
- Register lhs = no_reg,
- Register rhs = no_reg) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_(nan_info == kCantBothBeNaN),
- include_number_compare_(include_number_compare),
- lhs_(lhs),
- rhs_(rhs),
- name_(NULL) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key CCCCCCCCCCCCRCNS.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class RegisterField: public BitField<bool, 3, 1> {};
- class ConditionField: public BitField<int, 4, 12> {};
-
- Major MajorKey() { return Compare; }
-
- int MinorKey();
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
-
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- char* name_;
- const char* GetName();
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (cc %d), (strict %s), "
- "(never_nan_nan %s), (number_compare %s) ",
- static_cast<int>(cc_),
- strict_ ? "true" : "false",
- never_nan_nan_ ? "true" : "false",
- include_number_compare_ ? "included" : "not included");
-
- if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
- PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
- } else {
- PrintF("\n");
- }
- }
-#endif
-};
-
-
-class CEntryStub : public CodeStub {
- public:
- explicit CEntryStub(int result_size,
- ExitFrame::Mode mode = ExitFrame::MODE_NORMAL)
- : result_size_(result_size), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- void GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int alignment_skew = 0);
- void GenerateThrowTOS(MacroAssembler* masm);
- void GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type);
-
- // Number of pointers/values returned.
- const int result_size_;
- const ExitFrame::Mode mode_;
-
- // Minor key encoding
- class ExitFrameModeBits: public BitField<ExitFrame::Mode, 0, 1> {};
- class IndirectResultBits: public BitField<bool, 1, 1> {};
-
- Major MajorKey() { return CEntry; }
- // Minor key must differ if different result_size_ values means different
- // code is generated.
- int MinorKey();
-
- const char* GetName() { return "CEntryStub"; }
-};
-
-
-class ApiGetterEntryStub : public CodeStub {
- public:
- ApiGetterEntryStub(Handle<AccessorInfo> info,
- ApiFunction* fun)
- : info_(info),
- fun_(fun) { }
- void Generate(MacroAssembler* masm);
- virtual bool has_custom_cache() { return true; }
- virtual bool GetCustomCache(Code** code_out);
- virtual void SetCustomCache(Code* value);
-
- static const int kStackSpace = 5;
- static const int kArgc = 4;
- private:
- Handle<AccessorInfo> info() { return info_; }
- ApiFunction* fun() { return fun_; }
- Major MajorKey() { return NoCache; }
- int MinorKey() { return 0; }
- const char* GetName() { return "ApiEntryStub"; }
- // The accessor info associated with the function.
- Handle<AccessorInfo> info_;
- // The function to be called.
- ApiFunction* fun_;
-};
-
-
-class JSEntryStub : public CodeStub {
- public:
- JSEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_construct);
-
- private:
- Major MajorKey() { return JSEntry; }
- int MinorKey() { return 0; }
-
- const char* GetName() { return "JSEntryStub"; }
-};
-
-
-class JSConstructEntryStub : public JSEntryStub {
- public:
- JSConstructEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
-
- private:
- int MinorKey() { return 1; }
-
- const char* GetName() { return "JSConstructEntryStub"; }
-};
-
-
-class ArgumentsAccessStub: public CodeStub {
- public:
- enum Type {
- READ_ELEMENT,
- NEW_OBJECT
- };
-
- explicit ArgumentsAccessStub(Type type) : type_(type) { }
-
- private:
- Type type_;
-
- Major MajorKey() { return ArgumentsAccess; }
- int MinorKey() { return type_; }
-
- void Generate(MacroAssembler* masm);
- void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewObject(MacroAssembler* masm);
-
- const char* GetName() { return "ArgumentsAccessStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("ArgumentsAccessStub (type %d)\n", type_);
- }
-#endif
-};
-
-
-class RegExpExecStub: public CodeStub {
- public:
- RegExpExecStub() { }
-
- private:
- Major MajorKey() { return RegExpExec; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpExecStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpExecStub\n");
- }
-#endif
-};
-
-
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
- : argc_(argc), in_loop_(in_loop), flags_(flags) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
- CallFunctionFlags flags_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
- argc_,
- static_cast<int>(in_loop_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
- class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
- class ArgcBits: public BitField<int, 2, 32 - 2> {};
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() {
- // Encode the parameters in a unique 32 bit value.
- return InLoopBits::encode(in_loop_)
- | FlagBits::encode(flags_)
- | ArgcBits::encode(argc_);
- }
-
- InLoopFlag InLoop() { return in_loop_; }
- bool ReceiverMightBeValue() {
- return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
- }
-
- public:
- static int ExtractArgcFromMinorKey(int minor_key) {
- return ArgcBits::decode(minor_key);
- }
-};
-
-
-enum StringIndexFlags {
- // Accepts smis or heap numbers.
- STRING_INDEX_IS_NUMBER,
-
- // Accepts smis or heap numbers that are valid array indices
- // (ECMA-262 15.4). Invalid indices are reported as being out of
- // range.
- STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
-// Generates code implementing String.prototype.charCodeAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch| and |result| are clobbered.
-class StringCharCodeAtGenerator {
- public:
- StringCharCodeAtGenerator(Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : object_(object),
- index_(index),
- scratch_(scratch),
- result_(result),
- receiver_not_string_(receiver_not_string),
- index_not_number_(index_not_number),
- index_out_of_range_(index_out_of_range),
- index_flags_(index_flags) {
- ASSERT(!scratch_.is(object_));
- ASSERT(!scratch_.is(index_));
- ASSERT(!scratch_.is(result_));
- ASSERT(!result_.is(object_));
- ASSERT(!result_.is(index_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register object_;
- Register index_;
- Register scratch_;
- Register result_;
-
- Label* receiver_not_string_;
- Label* index_not_number_;
- Label* index_out_of_range_;
-
- StringIndexFlags index_flags_;
-
- Label call_runtime_;
- Label index_not_smi_;
- Label got_smi_index_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
-};
-
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
- public:
- StringCharFromCodeGenerator(Register code,
- Register result)
- : code_(code),
- result_(result) {
- ASSERT(!code_.is(result_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register code_;
- Register result_;
-
- Label slow_case_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
- StringCharAtGenerator(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : char_code_at_generator_(object,
- index,
- scratch1,
- scratch2,
- receiver_not_string,
- index_not_number,
- index_out_of_range,
- index_flags),
- char_from_code_generator_(scratch2, result) {}
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- StringCharCodeAtGenerator char_code_at_generator_;
- StringCharFromCodeGenerator char_from_code_generator_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-} // namespace internal
-} // namespace v8
+} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 14252a5892..7402e6857d 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -79,10 +79,9 @@ class CompilationSubCache {
// young generation.
void Age();
- bool HasFunction(SharedFunctionInfo* function_info);
-
// GC support.
void Iterate(ObjectVisitor* v);
+ void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
@@ -206,27 +205,6 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
}
-bool CompilationSubCache::HasFunction(SharedFunctionInfo* function_info) {
- if (function_info->script()->IsUndefined() ||
- Script::cast(function_info->script())->source()->IsUndefined()) {
- return false;
- }
-
- String* source =
- String::cast(Script::cast(function_info->script())->source());
- // Check all generations.
- for (int generation = 0; generation < generations(); generation++) {
- if (tables_[generation]->IsUndefined()) continue;
-
- CompilationCacheTable* table =
- CompilationCacheTable::cast(tables_[generation]);
- Object* object = table->Lookup(source);
- if (object->IsSharedFunctionInfo()) return true;
- }
- return false;
-}
-
-
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
@@ -238,6 +216,16 @@ void CompilationSubCache::Age() {
}
+void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
+ Object* undefined = Heap::raw_unchecked_undefined_value();
+ for (int i = 0; i < generations_; i++) {
+ if (tables_[i] != undefined) {
+ reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
+ }
+ }
+}
+
+
void CompilationSubCache::Iterate(ObjectVisitor* v) {
v->VisitPointers(&tables_[0], &tables_[generations_]);
}
@@ -528,15 +516,16 @@ void CompilationCache::Clear() {
}
}
-
-bool CompilationCache::HasFunction(SharedFunctionInfo* function_info) {
- return script.HasFunction(function_info);
+void CompilationCache::Iterate(ObjectVisitor* v) {
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Iterate(v);
+ }
}
-void CompilationCache::Iterate(ObjectVisitor* v) {
+void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Iterate(v);
+ subcaches[i]->IterateFunctions(v);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 583f04c48a..22ecff8358 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -79,11 +79,9 @@ class CompilationCache {
// Clear the cache - also used to initialize the cache at startup.
static void Clear();
-
- static bool HasFunction(SharedFunctionInfo* function_info);
-
// GC support.
static void Iterate(ObjectVisitor* v);
+ static void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 9f0162ea7e..bf6d41d854 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -33,7 +33,6 @@
#include "compiler.h"
#include "data-flow.h"
#include "debug.h"
-#include "flow-graph.h"
#include "full-codegen.h"
#include "liveedit.h"
#include "oprofile-agent.h"
@@ -92,27 +91,6 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null();
}
- if (function->scope()->num_parameters() > 0 ||
- function->scope()->num_stack_slots()) {
- AssignedVariablesAnalyzer ava(function);
- ava.Analyze();
- if (ava.HasStackOverflow()) {
- return Handle<Code>::null();
- }
- }
-
- if (FLAG_use_flow_graph) {
- FlowGraphBuilder builder;
- FlowGraph* graph = builder.Build(function);
- USE(graph);
-
-#ifdef DEBUG
- if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- graph->PrintAsText(function->name());
- }
-#endif
- }
-
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not:
@@ -126,17 +104,13 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
-
- if (AlwaysFullCompiler()) {
+ bool use_full = FLAG_full_compiler && !function->contains_loops();
+ if (AlwaysFullCompiler() || (use_full && is_run_once)) {
return FullCodeGenerator::MakeCode(info);
- } else if (FLAG_full_compiler && is_run_once) {
- FullCodeGenSyntaxChecker checker;
- checker.Check(function);
- if (checker.has_supported_syntax()) {
- return FullCodeGenerator::MakeCode(info);
- }
}
+ AssignedVariablesAnalyzer ava(function);
+ if (!ava.Analyze()) return Handle<Code>::null();
return CodeGenerator::MakeCode(info);
}
@@ -442,6 +416,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// object last we avoid this.
shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code);
+ if (!info->closure().is_null()) {
+ info->closure()->set_code(*code);
+ }
// Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
@@ -454,6 +431,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Check the function has compiled code.
ASSERT(shared->is_compiled());
+ shared->set_code_age(0);
return true;
}
@@ -489,49 +467,19 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
return Handle<SharedFunctionInfo>::null();
}
- if (literal->scope()->num_parameters() > 0 ||
- literal->scope()->num_stack_slots()) {
- AssignedVariablesAnalyzer ava(literal);
- ava.Analyze();
- if (ava.HasStackOverflow()) {
- return Handle<SharedFunctionInfo>::null();
- }
- }
-
- if (FLAG_use_flow_graph) {
- FlowGraphBuilder builder;
- FlowGraph* graph = builder.Build(literal);
- USE(graph);
-
-#ifdef DEBUG
- if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- graph->PrintAsText(literal->name());
- }
-#endif
- }
-
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
CompilationInfo info(literal, script, false);
bool is_run_once = literal->try_full_codegen();
- bool is_compiled = false;
-
- if (AlwaysFullCompiler()) {
+ bool use_full = FLAG_full_compiler && !literal->contains_loops();
+ if (AlwaysFullCompiler() || (use_full && is_run_once)) {
code = FullCodeGenerator::MakeCode(&info);
- is_compiled = true;
- } else if (FLAG_full_compiler && is_run_once) {
- FullCodeGenSyntaxChecker checker;
- checker.Check(literal);
- if (checker.has_supported_syntax()) {
- code = FullCodeGenerator::MakeCode(&info);
- is_compiled = true;
- }
- }
-
- if (!is_compiled) {
+ } else {
// We fall back to the classic V8 code generator.
+ AssignedVariablesAnalyzer ava(literal);
+ if (!ava.Analyze()) return Handle<SharedFunctionInfo>::null();
code = CodeGenerator::MakeCode(&info);
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index d1c98bd954..78dda6a64a 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -28,6 +28,9 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
+#include "heap.h"
+#include "objects.h"
+
namespace v8 {
namespace internal {
@@ -86,6 +89,7 @@ enum ContextLookupFlags {
V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
+ V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
@@ -211,6 +215,7 @@ class Context: public FixedArray {
CONFIGURE_GLOBAL_INDEX,
FUNCTION_CACHE_INDEX,
JSFUNCTION_RESULT_CACHES_INDEX,
+ NORMALIZED_MAP_CACHE_INDEX,
RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX,
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
@@ -243,7 +248,8 @@ class Context: public FixedArray {
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
+ ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
+ IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 1e2bb20c4f..90cdc773ea 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -733,11 +733,18 @@ double StringToInt(String* str, int radix) {
double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
-
return InternalStringToDouble(str, end, flags, empty_string_val);
}
+double StringToDouble(Vector<const char> str,
+ int flags,
+ double empty_string_val) {
+ const char* end = str.start() + str.length();
+ return InternalStringToDouble(str.start(), end, flags, empty_string_val);
+}
+
+
extern "C" char* dtoa(double d, int mode, int ndigits,
int* decpt, int* sign, char** rve);
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index c4ceea6b90..9e32a0cdb5 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -96,8 +96,12 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToDouble(String* str, int flags, double empty_string_val = 0);
+double StringToDouble(Vector<const char> str,
+ int flags,
+ double empty_string_val = 0);
+// This version expects a zero-terminated character array.
+double StringToDouble(const char* str, int flags, double empty_string_val = 0);
// Converts a string into an integer.
double StringToInt(String* str, int radix);
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 3e554ccebd..4248a64338 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -46,7 +46,7 @@ static const int kTickSamplesBufferChunksCount = 16;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator),
- running_(false),
+ running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
@@ -235,8 +235,19 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- if (rec->order == dequeue_order) {
- generator_->RecordTickSample(rec->sample);
+ // Make a local copy of tick sample record to ensure that it won't
+ // be modified as we are processing it. This is possible as the
+ // sampler writes w/o any sync to the queue, so if the processor
+ // will get far behind, a record may be modified right under its
+ // feet.
+ TickSampleEventRecord record = *rec;
+ if (record.order == dequeue_order) {
+ // A paranoid check to make sure that we don't get a memory overrun
+ // in case of frames_count having a wild value.
+ if (record.sample.frames_count < 0
+ || record.sample.frames_count >= TickSample::kMaxFramesCount)
+ record.sample.frames_count = 0;
+ generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
} else {
return true;
@@ -247,7 +258,6 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
- running_ = true;
while (running_) {
// Process ticks until we have any.
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7fd7925baa..5a1e63a763 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -486,7 +486,7 @@ void Shell::Initialize() {
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
+ v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
}
// Start the in-process debugger if requested.
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index 55d85825b2..d480c1bcf9 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -50,258 +50,13 @@ void BitVector::Print() {
#endif
-void AstLabeler::Label(CompilationInfo* info) {
- info_ = info;
- VisitStatements(info_->function()->body());
-}
-
-
-void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- }
-}
-
-
-void AstLabeler::VisitDeclarations(ZoneList<Declaration*>* decls) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void AstLabeler::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) {
- // Do nothing.
-}
-
-
-void AstLabeler::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
- expr->set_num(next_number_++);
- Variable* var = expr->var();
- if (var->is_global() && !var->is_this()) {
- info_->set_has_globals(true);
- }
-}
-
-
-void AstLabeler::VisitLiteral(Literal* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->IsPropertyName());
- VariableProxy* proxy = prop->obj()->AsVariableProxy();
- USE(proxy);
- ASSERT(proxy != NULL && proxy->var()->is_this());
- info()->set_has_this_properties(true);
-
- prop->obj()->set_num(AstNode::kNoNumber);
- prop->key()->set_num(AstNode::kNoNumber);
- Visit(expr->value());
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitProperty(Property* expr) {
- ASSERT(expr->key()->IsPropertyName());
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- USE(proxy);
- ASSERT(proxy != NULL && proxy->var()->is_this());
- info()->set_has_this_properties(true);
-
- expr->obj()->set_num(AstNode::kNoNumber);
- expr->key()->set_num(AstNode::kNoNumber);
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitCall(Call* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCountOperation(CountOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitCompareOperation(CompareOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
- : fun_(fun),
- av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}
-
-
-void AssignedVariablesAnalyzer::Analyze() {
- ASSERT(av_.length() > 0);
+bool AssignedVariablesAnalyzer::Analyze() {
+ Scope* scope = fun_->scope();
+ int variables = scope->num_parameters() + scope->num_stack_slots();
+ if (variables == 0) return true;
+ av_.ExpandTo(variables);
VisitStatements(fun_->body());
+ return !HasStackOverflow();
}
@@ -394,7 +149,7 @@ void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
!var->is_arguments() &&
var->mode() != Variable::CONST &&
(var->is_this() || !av_.Contains(BitIndex(var)))) {
- expr->AsVariableProxy()->set_is_trivial(true);
+ expr->AsVariableProxy()->MarkAsTrivial();
}
}
@@ -489,9 +244,7 @@ void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) Visit(stmt->init());
-
if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
-
if (stmt->next() != NULL) Visit(stmt->next());
// Process loop body. After visiting the loop body av_ contains
@@ -504,7 +257,6 @@ void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (var != NULL && !av_.Contains(BitIndex(var))) {
stmt->set_loop_variable(var);
}
-
av_.Union(saved_av);
}
@@ -712,13 +464,20 @@ void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(av_.IsEmpty());
+ MarkIfTrivial(expr->expression());
Visit(expr->expression());
}
+void AssignedVariablesAnalyzer::VisitIncrementOperation(
+ IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
ASSERT(av_.IsEmpty());
-
+ if (expr->is_prefix()) MarkIfTrivial(expr->expression());
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
@@ -744,6 +503,13 @@ void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
}
+void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
+ ASSERT(av_.IsEmpty());
+ MarkIfTrivial(expr->expression());
+ Visit(expr->expression());
+}
+
+
void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
// Nothing to do.
ASSERT(av_.IsEmpty());
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 079da65b4d..540db162f6 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -42,12 +42,10 @@ class Node;
class BitVector: public ZoneObject {
public:
- explicit BitVector(int length)
- : length_(length),
- data_length_(SizeFor(length)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
- ASSERT(length > 0);
- Clear();
+ BitVector() : length_(0), data_length_(0), data_(NULL) { }
+
+ explicit BitVector(int length) {
+ ExpandTo(length);
}
BitVector(const BitVector& other)
@@ -57,8 +55,12 @@ class BitVector: public ZoneObject {
CopyFrom(other);
}
- static int SizeFor(int length) {
- return 1 + ((length - 1) / 32);
+ void ExpandTo(int length) {
+ ASSERT(length > 0);
+ length_ = length;
+ data_length_ = SizeFor(length);
+ data_ = Zone::NewArray<uint32_t>(data_length_);
+ Clear();
}
BitVector& operator=(const BitVector& rhs) {
@@ -137,6 +139,10 @@ class BitVector: public ZoneObject {
#endif
private:
+ static int SizeFor(int length) {
+ return 1 + ((length - 1) / 32);
+ }
+
int length_;
int data_length_;
uint32_t* data_;
@@ -187,63 +193,13 @@ class WorkList BASE_EMBEDDED {
};
-struct ReachingDefinitionsData BASE_EMBEDDED {
- public:
- ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
-
- void Initialize(int definition_count) {
- rd_in_ = new BitVector(definition_count);
- kill_ = new BitVector(definition_count);
- gen_ = new BitVector(definition_count);
- }
-
- BitVector* rd_in() { return rd_in_; }
- BitVector* kill() { return kill_; }
- BitVector* gen() { return gen_; }
-
- private:
- BitVector* rd_in_;
- BitVector* kill_;
- BitVector* gen_;
-};
-
-
-// This class is used to number all expressions in the AST according to
-// their evaluation order (post-order left-to-right traversal).
-class AstLabeler: public AstVisitor {
- public:
- AstLabeler() : next_number_(0) {}
-
- void Label(CompilationInfo* info);
-
- private:
- CompilationInfo* info() { return info_; }
-
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Traversal number for labelling AST nodes.
- int next_number_;
-
- CompilationInfo* info_;
-
- DISALLOW_COPY_AND_ASSIGN(AstLabeler);
-};
-
-
// Computes the set of assigned variables and annotates variables proxies
// that are trivial sub-expressions and for-loops where the loop variable
// is guaranteed to be a smi.
class AssignedVariablesAnalyzer : public AstVisitor {
public:
- explicit AssignedVariablesAnalyzer(FunctionLiteral* fun);
-
- void Analyze();
+ explicit AssignedVariablesAnalyzer(FunctionLiteral* fun) : fun_(fun) { }
+ bool Analyze();
private:
Variable* FindSmiLoopVariable(ForStatement* stmt);
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 9c42a04f67..b101ea66b5 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -137,12 +137,18 @@ var DST_offset_cache = {
// Time interval where the cached offset is valid.
start: 0, end: -1,
// Size of next interval expansion.
- increment: 0
+ increment: 0,
+ initial_increment: 19 * msPerDay
};
// NOTE: The implementation relies on the fact that no time zones have
-// more than one daylight savings offset change per month.
+// more than one daylight savings offset change per 19 days.
+//
+// In Egypt in 2010 they decided to suspend DST during Ramadan. This
+// led to a short interval where DST is in effect from September 10 to
+// September 30.
+//
// If this function is called with NaN it returns NaN.
function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object.
@@ -171,7 +177,7 @@ function DaylightSavingsOffset(t) {
// the offset in the cache, we grow the cached time interval
// and return the offset.
cache.end = new_end;
- cache.increment = msPerMonth;
+ cache.increment = cache.initial_increment;
return end_offset;
} else {
var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
@@ -182,7 +188,7 @@ function DaylightSavingsOffset(t) {
// the interval to reflect this and reset the increment.
cache.start = t;
cache.end = new_end;
- cache.increment = msPerMonth;
+ cache.increment = cache.initial_increment;
} else {
// The interval contains a DST offset change and the given time is
// before it. Adjust the increment to avoid a linear search for
@@ -207,7 +213,7 @@ function DaylightSavingsOffset(t) {
var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
cache.offset = offset;
cache.start = cache.end = t;
- cache.increment = msPerMonth;
+ cache.increment = cache.initial_increment;
return offset;
}
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index d999d9ca7d..cae9b08d5b 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -92,7 +92,7 @@ class DateParser : public AllStatic {
int ReadWord(uint32_t* prefix, int prefix_size) {
int len;
for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
- if (len < prefix_size) prefix[len] = GetAsciiAlphaLower();
+ if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_);
}
for (int i = len; i < prefix_size; i++) prefix[i] = 0;
return len;
@@ -130,10 +130,6 @@ class DateParser : public AllStatic {
bool HasReadNumber() const { return has_read_number_; }
private:
- // If current character is in 'A'-'Z' or 'a'-'z', return its lower-case.
- // Else, return something outside of 'A'-'Z' and 'a'-'z'.
- uint32_t GetAsciiAlphaLower() const { return ch_ | 32; }
-
int index_;
Vector<Char> buffer_;
bool has_read_number_;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index c13c8c9878..87780d350c 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -461,6 +461,8 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
KeyedStoreIC::ClearInlinedVersion(pc());
} else if (code->is_load_stub()) {
LoadIC::ClearInlinedVersion(pc());
+ } else if (code->is_store_stub()) {
+ StoreIC::ClearInlinedVersion(pc());
}
}
}
@@ -549,6 +551,7 @@ void Debug::ThreadInit() {
thread_local_.after_break_target_ = 0;
thread_local_.debugger_entry_ = NULL;
thread_local_.pending_interrupts_ = 0;
+ thread_local_.restarter_frame_function_pointer_ = NULL;
}
@@ -1004,17 +1007,18 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
- break_points_hit->SetElement(break_points_hit_count++, *o);
+ SetElement(break_points_hit, break_points_hit_count++, o);
}
}
} else {
if (CheckBreakPoint(break_point_objects)) {
- break_points_hit->SetElement(break_points_hit_count++,
- *break_point_objects);
+ SetElement(break_points_hit,
+ break_points_hit_count++,
+ break_point_objects);
}
}
- // Return undefined if no break points where triggered.
+ // Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
return Factory::undefined_value();
}
@@ -1440,7 +1444,7 @@ bool Debug::IsDebugBreak(Address addr) {
// Check whether a code stub with the specified major key is a possible break
// point location when looking for source break locations.
bool Debug::IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = code->major_key();
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction;
}
@@ -1448,7 +1452,7 @@ bool Debug::IsSourceBreakStub(Code* code) {
// Check whether a code stub with the specified major key is a possible break
// location.
bool Debug::IsBreakStub(Code* code) {
- CodeStub::Major major_key = code->major_key();
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction ||
major_key == CodeStub::StackCheck;
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 98d1919423..8b3b29e636 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -29,7 +29,6 @@
#define V8_DEBUG_H_
#include "assembler.h"
-#include "code-stubs.h"
#include "debug-agent.h"
#include "execution.h"
#include "factory.h"
@@ -332,8 +331,7 @@ class Debug {
k_after_break_target_address,
k_debug_break_return_address,
k_debug_break_slot_address,
- k_restarter_frame_function_pointer,
- k_register_address
+ k_restarter_frame_function_pointer
};
// Support for setting the address to jump to when returning from break point.
@@ -953,10 +951,7 @@ class DisableBreak BASE_EMBEDDED {
// code.
class Debug_Address {
public:
- Debug_Address(Debug::AddressId id, int reg = 0)
- : id_(id), reg_(reg) {
- ASSERT(reg == 0 || id == Debug::k_register_address);
- }
+ explicit Debug_Address(Debug::AddressId id) : id_(id) { }
static Debug_Address AfterBreakTarget() {
return Debug_Address(Debug::k_after_break_target_address);
@@ -970,10 +965,6 @@ class Debug_Address {
return Debug_Address(Debug::k_restarter_frame_function_pointer);
}
- static Debug_Address Register(int reg) {
- return Debug_Address(Debug::k_register_address, reg);
- }
-
Address address() const {
switch (id_) {
case Debug::k_after_break_target_address:
@@ -985,8 +976,6 @@ class Debug_Address {
case Debug::k_restarter_frame_function_pointer:
return reinterpret_cast<Address>(
Debug::restarter_frame_function_pointer_address());
- case Debug::k_register_address:
- return reinterpret_cast<Address>(Debug::register_address(reg_));
default:
UNREACHABLE();
return NULL;
@@ -994,7 +983,6 @@ class Debug_Address {
}
private:
Debug::AddressId id_;
- int reg_;
};
// The optional thread that Debug Agent may use to temporary call V8 to process
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 19cb6af728..e79421fe2f 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -258,11 +258,12 @@ static int DecodeIt(FILE* f,
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(obj)->value();
uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ",
Code::Kind2String(kind),
- CodeStub::MajorName(code->major_key(), false));
- switch (code->major_key()) {
+ CodeStub::MajorName(major_key, false));
+ switch (major_key) {
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
break;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index a6b15ccb45..54216784a7 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -710,7 +710,7 @@ class SimpleStringResource : public Base {
: data_(data),
length_(length) {}
- virtual ~SimpleStringResource() { delete data_; }
+ virtual ~SimpleStringResource() { delete[] data_; }
virtual const Char* data() const { return data_; }
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index a143bcd6f5..a63088d5a5 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -283,7 +283,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-DEFINE_string(map_counters, false, "Map counters to a file")
+DEFINE_string(map_counters, NULL, "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index a8eca95c2c..f9cbde0bf7 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -27,8 +27,6 @@
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
-#include "checks.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flow-graph.cc b/deps/v8/src/flow-graph.cc
deleted file mode 100644
index 02a2cd9cfe..0000000000
--- a/deps/v8/src/flow-graph.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "flow-graph.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
- ZoneList<BasicBlock*>* postorder,
- bool mark) {
- if (mark_ == mark) return;
- mark_ = mark;
- preorder->Add(this);
- if (right_successor_ != NULL) {
- right_successor_->BuildTraversalOrder(preorder, postorder, mark);
- }
- if (left_successor_ != NULL) {
- left_successor_->BuildTraversalOrder(preorder, postorder, mark);
- }
- postorder->Add(this);
-}
-
-
-FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
- // Create new entry and exit nodes. These will not change during
- // construction.
- entry_ = new BasicBlock(NULL);
- exit_ = new BasicBlock(NULL);
- // Begin accumulating instructions in the entry block.
- current_ = entry_;
-
- VisitDeclarations(lit->scope()->declarations());
- VisitStatements(lit->body());
- // In the event of stack overflow or failure to handle a syntactic
- // construct, return an invalid flow graph.
- if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
-
- // If current is not the exit, add a link to the exit.
- if (current_ != exit_) {
- // If current already has a successor (i.e., will be a branch node) and
- // if the exit already has a predecessor, insert an empty block to
- // maintain edge split form.
- if (current_->HasSuccessor() && exit_->HasPredecessor()) {
- current_ = new BasicBlock(current_);
- }
- Literal* undefined = new Literal(Factory::undefined_value());
- current_->AddInstruction(new ReturnStatement(undefined));
- exit_->AddPredecessor(current_);
- }
-
- FlowGraph* graph = new FlowGraph(entry_, exit_);
- bool mark = !entry_->GetMark();
- entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
-
-#ifdef DEBUG
- // Number the nodes in reverse postorder.
- int n = 0;
- for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
- graph->postorder()->at(i)->set_number(n++);
- }
-#endif
-
- return graph;
-}
-
-
-void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
- Variable* var = decl->proxy()->AsVariable();
- Slot* slot = var->slot();
- // We allow only declarations that do not require code generation.
- // The following all require code generation: global variables and
- // functions, variables with slot type LOOKUP, declarations with
- // mode CONST, and functions.
-
- if (var->is_global() ||
- (slot != NULL && slot->type() == Slot::LOOKUP) ||
- decl->mode() == Variable::CONST ||
- decl->fun() != NULL) {
- // Here and in the rest of the flow graph builder we indicate an
- // unsupported syntactic construct by setting the stack overflow
- // flag on the visitor. This causes bailout of the visitor.
- SetStackOverflow();
- }
-}
-
-
-void FlowGraphBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- // Build a diamond in the flow graph. First accumulate the instructions
- // of the test in the current basic block.
- Visit(stmt->condition());
-
- // Remember the branch node and accumulate the true branch as its left
- // successor. This relies on the successors being added left to right.
- BasicBlock* branch = current_;
- current_ = new BasicBlock(branch);
- Visit(stmt->then_statement());
-
- // Construct a join node and then accumulate the false branch in a fresh
- // successor of the branch node.
- BasicBlock* join = new BasicBlock(current_);
- current_ = new BasicBlock(branch);
- Visit(stmt->else_statement());
- join->AddPredecessor(current_);
-
- current_ = join;
-}
-
-
-void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
- // Build a loop in the flow graph. First accumulate the instructions of
- // the initializer in the current basic block.
- if (stmt->init() != NULL) Visit(stmt->init());
-
- // Create a new basic block for the test. This will be the join node.
- BasicBlock* join = new BasicBlock(current_);
- current_ = join;
- if (stmt->cond() != NULL) Visit(stmt->cond());
-
- // The current node is the branch node. Create a new basic block to begin
- // the body.
- BasicBlock* branch = current_;
- current_ = new BasicBlock(branch);
- Visit(stmt->body());
- if (stmt->next() != NULL) Visit(stmt->next());
-
- // Add the backward edge from the end of the body and continue with the
- // false arm of the branch.
- join->AddPredecessor(current_);
- current_ = new BasicBlock(branch);
-}
-
-
-void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitConditional(Conditional* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSlot(Slot* expr) {
- // Slots do not appear in the AST.
- UNREACHABLE();
-}
-
-
-void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitLiteral(Literal* expr) {
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
- // There are three basic kinds of assignment: variable assignments,
- // property assignments, and invalid left-hand sides (which are translated
- // to "throw ReferenceError" by the parser).
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (expr->is_compound() && !expr->target()->IsTrivial()) {
- Visit(expr->target());
- }
- if (!expr->value()->IsTrivial()) Visit(expr->value());
- current_->AddInstruction(expr);
-
- } else if (prop != NULL) {
- if (!prop->obj()->IsTrivial()) Visit(prop->obj());
- if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
- Visit(prop->key());
- }
- if (!expr->value()->IsTrivial()) Visit(expr->value());
- current_->AddInstruction(expr);
-
- } else {
- Visit(expr->target());
- }
-}
-
-
-void FlowGraphBuilder::VisitThrow(Throw* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitProperty(Property* expr) {
- if (!expr->obj()->IsTrivial()) Visit(expr->obj());
- if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
- Visit(expr->key());
- }
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCall(Call* expr) {
- Visit(expr->expression());
- VisitExpressions(expr->arguments());
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::NOT:
- case Token::BIT_NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- case Token::VOID:
- SetStackOverflow();
- break;
-
- case Token::ADD:
- case Token::SUB:
- Visit(expr->expression());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- SetStackOverflow();
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- if (!expr->left()->IsTrivial()) Visit(expr->left());
- if (!expr->right()->IsTrivial()) Visit(expr->right());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- switch (expr->op()) {
- case Token::EQ:
- case Token::NE:
- case Token::EQ_STRICT:
- case Token::NE_STRICT:
- case Token::INSTANCEOF:
- case Token::IN:
- SetStackOverflow();
- break;
-
- case Token::LT:
- case Token::GT:
- case Token::LTE:
- case Token::GTE:
- if (!expr->left()->IsTrivial()) Visit(expr->left());
- if (!expr->right()->IsTrivial()) Visit(expr->right());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- SetStackOverflow();
-}
-
-
-#ifdef DEBUG
-
-// Print a textual representation of an instruction in a flow graph.
-class InstructionPrinter: public AstVisitor {
- public:
- InstructionPrinter() {}
-
- private:
- // Overridden from the base class.
- virtual void VisitExpressions(ZoneList<Expression*>* exprs);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
-};
-
-
-static void PrintSubexpression(Expression* expr) {
- if (!expr->IsTrivial()) {
- PrintF("@%d", expr->num());
- } else if (expr->AsLiteral() != NULL) {
- expr->AsLiteral()->handle()->Print();
- } else if (expr->AsVariableProxy() != NULL) {
- PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
- } else {
- UNREACHABLE();
- }
-}
-
-
-void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
- for (int i = 0; i < exprs->length(); ++i) {
- if (i != 0) PrintF(", ");
- PrintF("@%d", exprs->at(i)->num());
- }
-}
-
-
-// We only define printing functions for the node types that can occur as
-// instructions in a flow graph. The rest are unreachable.
-void InstructionPrinter::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitBlock(Block* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
- PrintF("return ");
- PrintSubexpression(stmt->expression());
-}
-
-
-void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
- Variable* var = expr->AsVariable();
- if (var != NULL) {
- PrintF("%s", *var->name()->ToCString());
- } else {
- ASSERT(expr->AsProperty() != NULL);
- Visit(expr->AsProperty());
- }
-}
-
-
-void InstructionPrinter::VisitLiteral(Literal* expr) {
- expr->handle()->Print();
-}
-
-
-void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
-
- // Print the left-hand side.
- Visit(expr->target());
- if (var == NULL && prop == NULL) return; // Throw reference error.
- PrintF(" = ");
- // For compound assignments, print the left-hand side again and the
- // corresponding binary operator.
- if (expr->is_compound()) {
- PrintSubexpression(expr->target());
- PrintF(" %s ", Token::String(expr->binary_op()));
- }
-
- // Print the right-hand side.
- PrintSubexpression(expr->value());
-}
-
-
-void InstructionPrinter::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitProperty(Property* expr) {
- PrintSubexpression(expr->obj());
- if (expr->key()->IsPropertyName()) {
- PrintF(".");
- ASSERT(expr->key()->AsLiteral() != NULL);
- expr->key()->AsLiteral()->handle()->Print();
- } else {
- PrintF("[");
- PrintSubexpression(expr->key());
- PrintF("]");
- }
-}
-
-
-void InstructionPrinter::VisitCall(Call* expr) {
- PrintF("@%d(", expr->expression()->num());
- VisitExpressions(expr->arguments());
- PrintF(")");
-}
-
-
-void InstructionPrinter::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
- PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
-}
-
-
-void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
- if (expr->is_prefix()) {
- PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
- } else {
- PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
- }
-}
-
-
-void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
- PrintSubexpression(expr->left());
- PrintF(" %s ", Token::String(expr->op()));
- PrintSubexpression(expr->right());
-}
-
-
-void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
- PrintSubexpression(expr->left());
- PrintF(" %s ", Token::String(expr->op()));
- PrintSubexpression(expr->right());
-}
-
-
-void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-
-int BasicBlock::PrintAsText(int instruction_number) {
- // Print a label for all blocks except the entry.
- if (HasPredecessor()) {
- PrintF("L%d:", number());
- }
-
- // Number and print the instructions. Since AST child nodes are visited
- // before their parents, the parent nodes can refer to them by number.
- InstructionPrinter printer;
- for (int i = 0; i < instructions_.length(); ++i) {
- PrintF("\n%d ", instruction_number);
- instructions_[i]->set_num(instruction_number++);
- instructions_[i]->Accept(&printer);
- }
-
- // If this is the exit, print "exit". If there is a single successor,
- // print "goto" successor on a separate line. If there are two
- // successors, print "goto" successor on the same line as the last
- // instruction in the block. There is a blank line between blocks (and
- // after the last one).
- if (left_successor_ == NULL) {
- PrintF("\nexit\n\n");
- } else if (right_successor_ == NULL) {
- PrintF("\ngoto L%d\n\n", left_successor_->number());
- } else {
- PrintF(", goto (L%d, L%d)\n\n",
- left_successor_->number(),
- right_successor_->number());
- }
-
- return instruction_number;
-}
-
-
-void FlowGraph::PrintAsText(Handle<String> name) {
- PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
- // Print nodes in reverse postorder. Note that AST node numbers are used
- // during printing of instructions and thus their current values are
- // destroyed.
- int number = 0;
- for (int i = postorder_.length() - 1; i >= 0; --i) {
- number = postorder_[i]->PrintAsText(number);
- }
-}
-
-#endif // DEBUG
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/flow-graph.h b/deps/v8/src/flow-graph.h
deleted file mode 100644
index f6af8410ae..0000000000
--- a/deps/v8/src/flow-graph.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FLOW_GRAPH_H_
-#define V8_FLOW_GRAPH_H_
-
-#include "v8.h"
-
-#include "data-flow.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// The nodes of a flow graph are basic blocks. Basic blocks consist of
-// instructions represented as pointers to AST nodes in the order that they
-// would be visited by the code generator. A block can have arbitrarily many
-// (even zero) predecessors and up to two successors. Blocks with multiple
-// predecessors are "join nodes" and blocks with multiple successors are
-// "branch nodes". A block can be both a branch and a join node.
-//
-// Flow graphs are in edge split form: a branch node is never the
-// predecessor of a merge node. Empty basic blocks are inserted to maintain
-// edge split form.
-class BasicBlock: public ZoneObject {
- public:
- // Construct a basic block with a given predecessor. NULL indicates no
- // predecessor or that the predecessor will be set later.
- explicit BasicBlock(BasicBlock* predecessor)
- : predecessors_(2),
- instructions_(8),
- left_successor_(NULL),
- right_successor_(NULL),
- mark_(false) {
- if (predecessor != NULL) AddPredecessor(predecessor);
- }
-
- bool HasPredecessor() { return !predecessors_.is_empty(); }
- bool HasSuccessor() { return left_successor_ != NULL; }
-
- // Add a given basic block as a predecessor of this block. This function
- // also adds this block as a successor of the given block.
- void AddPredecessor(BasicBlock* predecessor) {
- ASSERT(predecessor != NULL);
- predecessors_.Add(predecessor);
- predecessor->AddSuccessor(this);
- }
-
- // Add an instruction to the end of this block. The block must be "open"
- // by not having a successor yet.
- void AddInstruction(AstNode* instruction) {
- ASSERT(!HasSuccessor() && instruction != NULL);
- instructions_.Add(instruction);
- }
-
- // Perform a depth-first traversal of graph rooted at this node,
- // accumulating pre- and postorder traversal orders. Visited nodes are
- // marked with mark.
- void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
- ZoneList<BasicBlock*>* postorder,
- bool mark);
- bool GetMark() { return mark_; }
-
-#ifdef DEBUG
- // In debug mode, blocks are numbered in reverse postorder to help with
- // printing.
- int number() { return number_; }
- void set_number(int n) { number_ = n; }
-
- // Print a basic block, given the number of the first instruction.
- // Returns the next number after the number of the last instruction.
- int PrintAsText(int instruction_number);
-#endif
-
- private:
- // Add a given basic block as successor to this block. This function does
- // not add this block as a predecessor of the given block so as to avoid
- // circularity.
- void AddSuccessor(BasicBlock* successor) {
- ASSERT(right_successor_ == NULL && successor != NULL);
- if (HasSuccessor()) {
- right_successor_ = successor;
- } else {
- left_successor_ = successor;
- }
- }
-
- ZoneList<BasicBlock*> predecessors_;
- ZoneList<AstNode*> instructions_;
- BasicBlock* left_successor_;
- BasicBlock* right_successor_;
-
- // Support for graph traversal. Before traversal, all nodes in the graph
- // have the same mark (true or false). Traversal marks already-visited
- // nodes with the opposite mark. After traversal, all nodes again have
- // the same mark. Traversal of the same graph is not reentrant.
- bool mark_;
-
-#ifdef DEBUG
- int number_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(BasicBlock);
-};
-
-
-// A flow graph has distinguished entry and exit blocks. The entry block is
-// the only one with no predecessors and the exit block is the only one with
-// no successors.
-class FlowGraph: public ZoneObject {
- public:
- FlowGraph(BasicBlock* entry, BasicBlock* exit)
- : entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
- }
-
- ZoneList<BasicBlock*>* preorder() { return &preorder_; }
- ZoneList<BasicBlock*>* postorder() { return &postorder_; }
-
-#ifdef DEBUG
- void PrintAsText(Handle<String> name);
-#endif
-
- private:
- BasicBlock* entry_;
- BasicBlock* exit_;
- ZoneList<BasicBlock*> preorder_;
- ZoneList<BasicBlock*> postorder_;
-};
-
-
-// The flow graph builder walks the AST adding reachable AST nodes to the
-// flow graph as instructions. It remembers the entry and exit nodes of the
-// graph, and keeps a pointer to the current block being constructed.
-class FlowGraphBuilder: public AstVisitor {
- public:
- FlowGraphBuilder() {}
-
- FlowGraph* Build(FunctionLiteral* lit);
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- BasicBlock* entry_;
- BasicBlock* exit_;
- BasicBlock* current_;
-
- DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FLOW_GRAPH_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 7221851325..78bb646c78 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -64,9 +64,8 @@ inline bool StackHandler::includes(Address address) const {
}
-inline void StackHandler::Iterate(ObjectVisitor* v) const {
- // Stack handlers do not contain any pointers that need to be
- // traversed.
+inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
+ StackFrame::IteratePc(v, pc_address(), holder);
}
@@ -81,15 +80,9 @@ inline StackHandler::State StackHandler::state() const {
}
-inline Address StackHandler::pc() const {
+inline Address* StackHandler::pc_address() const {
const int offset = StackHandlerConstants::kPCOffset;
- return Memory::Address_at(address() + offset);
-}
-
-
-inline void StackHandler::set_pc(Address value) {
- const int offset = StackHandlerConstants::kPCOffset;
- Memory::Address_at(address() + offset) = value;
+ return reinterpret_cast<Address*>(address() + offset);
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index bdd5100ed8..76a441b64d 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -36,6 +36,11 @@
namespace v8 {
namespace internal {
+PcToCodeCache::PcToCodeCacheEntry
+ PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
+
+int SafeStackFrameIterator::active_count_ = 0;
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
@@ -88,7 +93,6 @@ StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
if (use_top || fp != NULL) {
Reset();
}
- JavaScriptFrame_.DisableHeapAccess();
}
#undef INITIALIZE_SINGLETON
@@ -201,7 +205,7 @@ bool StackTraceFrameIterator::IsValidFrame() {
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
- low_bound_(low_bound), high_bound_(high_bound),
+ maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
is_valid_top_(
IsWithinBounds(low_bound, high_bound,
Top::c_entry_fp(Top::GetCurrentThread())) &&
@@ -302,69 +306,42 @@ void SafeStackTraceFrameIterator::Advance() {
#endif
-// -------------------------------------------------------------------------
-
-
-void StackHandler::Cook(Code* code) {
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackHandler::Uncook(Code* code) {
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
-
-// -------------------------------------------------------------------------
-
-
bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler());
return !it.done();
}
-
-void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(!thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Cook();
+void StackFrame::IteratePc(ObjectVisitor* v,
+ Address* pc_address,
+ Code* holder) {
+ Address pc = *pc_address;
+ ASSERT(holder->contains(pc));
+ unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
+ Object* code = holder;
+ v->VisitPointer(&code);
+ if (code != holder) {
+ holder = reinterpret_cast<Code*>(code);
+ pc = holder->instruction_start() + pc_offset;
+ *pc_address = pc;
}
- thread->set_stack_is_cooked(true);
}
-void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Uncook();
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
}
- thread->set_stack_is_cooked(false);
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
-void StackFrame::Cook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Cook(code);
- }
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackFrame::Uncook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Uncook(code);
- }
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
@@ -372,8 +349,8 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
}
-Code* EntryFrame::code() const {
- return Heap::js_entry_code();
+Code* EntryFrame::unchecked_code() const {
+ return Heap::raw_unchecked_js_entry_code();
}
@@ -395,8 +372,8 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
}
-Code* EntryConstructFrame::code() const {
- return Heap::js_construct_entry_code();
+Code* EntryConstructFrame::unchecked_code() const {
+ return Heap::raw_unchecked_js_construct_entry_code();
}
@@ -406,8 +383,8 @@ Object*& ExitFrame::code_slot() const {
}
-Code* ExitFrame::code() const {
- return Code::cast(code_slot());
+Code* ExitFrame::unchecked_code() const {
+ return reinterpret_cast<Code*>(code_slot());
}
@@ -425,6 +402,14 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
}
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
+ IteratePc(v, pc_address(), code());
+ v->VisitPointer(&code_slot());
+}
+
+
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
@@ -493,22 +478,65 @@ bool JavaScriptFrame::IsConstructor() const {
}
-Code* JavaScriptFrame::code() const {
+Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
- return function->shared()->code();
+ return function->unchecked_code();
}
-Code* ArgumentsAdaptorFrame::code() const {
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC ||
+ SafeStackFrameIterator::is_active()) {
+ // If the we are currently iterating the safe stack the
+ // arguments for frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when objects may have been marked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+Code* ArgumentsAdaptorFrame::unchecked_code() const {
return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
}
-Code* InternalFrame::code() const {
+Code* InternalFrame::unchecked_code() const {
const int offset = InternalFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp() + offset);
ASSERT(code != NULL);
- return Code::cast(code);
+ return reinterpret_cast<Code*>(code);
}
@@ -694,13 +722,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
- handler->Iterate(v);
- // Make sure that there's the entry frame does not contain more than
- // one stack handler.
+ handler->Iterate(v, code());
#ifdef DEBUG
+ // Make sure that the entry frame does not contain more than one
+ // stack handler.
it.Advance();
ASSERT(it.done());
#endif
+ IteratePc(v, pc_address(), code());
}
@@ -717,7 +746,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
- handler->Iterate(v);
+ handler->Iterate(v, code());
}
v->VisitPointers(base, limit);
}
@@ -725,6 +754,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
// Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
@@ -739,6 +769,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
}
@@ -760,6 +791,56 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
+Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+ Code* code = reinterpret_cast<Code*>(object);
+ ASSERT(code != NULL && code->contains(pc));
+ return code;
+}
+
+
+Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+ // Check if the pc points into a large object chunk.
+ LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
+ if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+
+ // Iterate through the 8K page until we reach the end or find an
+ // object starting after the pc.
+ Page* page = Page::FromAddress(pc);
+ HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
+ HeapObject* previous = NULL;
+ while (true) {
+ HeapObject* next = iterator.next();
+ if (next == NULL || next->address() >= pc) {
+ return GcSafeCastToCode(previous, pc);
+ }
+ previous = next;
+ }
+}
+
+PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+ Counters::pc_to_code.Increment();
+ ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+ uint32_t hash = ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
+ uint32_t index = hash & (kPcToCodeCacheSize - 1);
+ PcToCodeCacheEntry* entry = cache(index);
+ if (entry->pc == pc) {
+ Counters::pc_to_code_cached.Increment();
+ ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+ } else {
+ // Because this code may be interrupted by a profiling signal that
+ // also queries the cache, we cannot update pc before the code has
+ // been set. Otherwise, we risk trying to use a cache entry before
+ // the code has been computed.
+ entry->code = GcSafeFindCodeForPc(pc);
+ entry->pc = pc;
+ }
+ return entry;
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
int n = 0;
while (reglist != 0) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 102244c9ba..20111904f5 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -46,6 +46,32 @@ class Top;
class ThreadLocalTop;
+class PcToCodeCache : AllStatic {
+ public:
+ struct PcToCodeCacheEntry {
+ Address pc;
+ Code* code;
+ };
+
+ static PcToCodeCacheEntry* cache(int index) {
+ return &cache_[index];
+ }
+
+ static Code* GcSafeFindCodeForPc(Address pc);
+ static Code* GcSafeCastToCode(HeapObject* object, Address pc);
+
+ static void FlushPcToCodeCache() {
+ memset(&cache_[0], 0, sizeof(cache_));
+ }
+
+ static PcToCodeCacheEntry* GetCacheEntry(Address pc);
+
+ private:
+ static const int kPcToCodeCacheSize = 256;
+ static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+};
+
+
class StackHandler BASE_EMBEDDED {
public:
enum State {
@@ -64,7 +90,7 @@ class StackHandler BASE_EMBEDDED {
inline bool includes(Address address) const;
// Garbage collection support.
- inline void Iterate(ObjectVisitor* v) const;
+ inline void Iterate(ObjectVisitor* v, Code* holder) const;
// Conversion support.
static inline StackHandler* FromAddress(Address address);
@@ -74,16 +100,11 @@ class StackHandler BASE_EMBEDDED {
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
- // Garbage collection support.
- void Cook(Code* code);
- void Uncook(Code* code);
-
private:
// Accessors.
inline State state() const;
- inline Address pc() const;
- inline void set_pc(Address value);
+ inline Address* pc_address() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
@@ -112,7 +133,13 @@ class StackFrame BASE_EMBEDDED {
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
- enum Id { NO_ID = 0 };
+ // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
+ // has correct value range (see Issue 830 for more details).
+ enum Id {
+ ID_MIN_VALUE = kMinInt,
+ ID_MAX_VALUE = kMaxInt,
+ NO_ID = 0
+ };
// Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) {
@@ -152,13 +179,20 @@ class StackFrame BASE_EMBEDDED {
virtual Type type() const = 0;
// Get the code associated with this frame.
- virtual Code* code() const = 0;
+ // This method could be called during marking phase of GC.
+ virtual Code* unchecked_code() const = 0;
- // Garbage collection support.
- static void CookFramesForThread(ThreadLocalTop* thread);
- static void UncookFramesForThread(ThreadLocalTop* thread);
+ // Get the code associated with this frame.
+ Code* code() const { return GetContainingCode(pc()); }
+
+ // Get the code object that contains the given pc.
+ Code* GetContainingCode(Address pc) const {
+ return PcToCodeCache::GetCacheEntry(pc)->code;
+ }
+
+ virtual void Iterate(ObjectVisitor* v) const = 0;
+ static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
- virtual void Iterate(ObjectVisitor* v) const { }
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
@@ -200,10 +234,6 @@ class StackFrame BASE_EMBEDDED {
// Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const;
- // Cooking/uncooking support.
- void Cook();
- void Uncook();
-
friend class StackFrameIterator;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
@@ -218,7 +248,7 @@ class EntryFrame: public StackFrame {
public:
virtual Type type() const { return ENTRY; }
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -249,7 +279,7 @@ class EntryConstructFrame: public EntryFrame {
public:
virtual Type type() const { return ENTRY_CONSTRUCT; }
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
static EntryConstructFrame* cast(StackFrame* frame) {
ASSERT(frame->is_entry_construct());
@@ -268,10 +298,9 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
- enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; }
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
Object*& code_slot() const;
@@ -397,7 +426,7 @@ class JavaScriptFrame: public StandardFrame {
int index) const;
// Determine the code for the frame.
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
@@ -406,19 +435,11 @@ class JavaScriptFrame: public StandardFrame {
protected:
explicit JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator), disable_heap_access_(false) { }
+ : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
- // When this mode is enabled it is not allowed to access heap objects.
- // This is a special mode used when gathering stack samples in profiler.
- // A shortcoming is that caller's SP value will be calculated incorrectly
- // (see GetCallerStackPointer implementation), but it is not used for stack
- // sampling.
- void DisableHeapAccess() { disable_heap_access_ = true; }
-
private:
- bool disable_heap_access_;
inline Object* function_slot_object() const;
friend class StackFrameIterator;
@@ -433,7 +454,7 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
virtual Type type() const { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
ASSERT(frame->is_arguments_adaptor());
@@ -463,7 +484,7 @@ class InternalFrame: public StandardFrame {
virtual void Iterate(ObjectVisitor* v) const;
// Determine the code for the frame.
- virtual Code* code() const;
+ virtual Code* unchecked_code() const;
static InternalFrame* cast(StackFrame* frame) {
ASSERT(frame->is_internal());
@@ -625,6 +646,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
+ static bool is_active() { return active_count_ > 0; }
+
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
@@ -638,6 +661,19 @@ class SafeStackFrameIterator BASE_EMBEDDED {
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
+ // This is a nasty hack to make sure the active count is incremented
+ // before the constructor for the embedded iterator is invoked. This
+ // is needed because the constructor will start looking at frames
+ // right away and we need to make sure it doesn't start inspecting
+ // heap objects.
+ class ActiveCountMaintainer BASE_EMBEDDED {
+ public:
+ ActiveCountMaintainer() { active_count_++; }
+ ~ActiveCountMaintainer() { active_count_--; }
+ };
+
+ ActiveCountMaintainer maintainer_;
+ static int active_count_;
Address low_bound_;
Address high_bound_;
const bool is_valid_top_;
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index e97ed76072..5ffebfb53b 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -30,6 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "full-codegen.h"
+#include "macro-assembler.h"
#include "scopes.h"
#include "stub-cache.h"
#include "debug.h"
@@ -38,407 +39,6 @@
namespace v8 {
namespace internal {
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
- VisitDeclarations(scope->declarations());
- CHECK_BAILOUT;
-
- VisitStatements(fun->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); i++) {
- Visit(decls->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- Visit(prop->obj());
- Visit(prop->key());
- }
-
- if (decl->fun() != NULL) {
- Visit(decl->fun());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
- CHECK_BAILOUT;
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- Visit(stmt->else_statement());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
- if (!FLAG_always_full_compiler) BAILOUT("ForStatement");
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- CHECK_BAILOUT;
- }
- if (stmt->cond() != NULL) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- }
- Visit(stmt->body());
- if (stmt->next() != NULL) {
- CHECK_BAILOUT;
- Visit(stmt->next());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->catch_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
- Visit(expr->condition());
- CHECK_BAILOUT;
- Visit(expr->then_expression());
- CHECK_BAILOUT;
- Visit(expr->else_expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
- for (int i = 0, len = properties->length(); i < len; i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (property->IsCompileTimeValue()) continue;
- Visit(property->key());
- CHECK_BAILOUT;
- Visit(property->value());
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- if (subexpr->AsLiteral() != NULL) continue;
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- Visit(expr->key());
- CHECK_BAILOUT;
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
- Token::Value op = expr->op();
- if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
- // All other variables are supported.
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("non-variable/non-property assignment");
- }
-
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
- Visit(expr->exception());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- Visit(expr->obj());
- CHECK_BAILOUT;
- Visit(expr->key());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- // Check for supported calls
- if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("call to the identifier 'eval'");
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Calls to global variables are supported.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- BAILOUT("call to a lookup slot");
- } else if (fun->AsProperty() != NULL) {
- Property* prop = fun->AsProperty();
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // Otherwise the call is supported if the function expression is.
- Visit(fun);
- }
- // Check all arguments to the call.
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
- Visit(expr->expression());
- CHECK_BAILOUT;
- ZoneList<Expression*>* args = expr->arguments();
- // Check all arguments to the call
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
- // Check for inline runtime call
- if (expr->name()->Get(0) == '_' &&
- CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("inlined runtime call");
- }
- // Check all arguments to the call. (Relies on TEMP meaning STACK.)
- for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::ADD:
- case Token::BIT_NOT:
- case Token::NOT:
- case Token::SUB:
- case Token::TYPEOF:
- case Token::VOID:
- Visit(expr->expression());
- break;
- case Token::DELETE:
- BAILOUT("UnaryOperation: DELETE");
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- Property* prop = expr->expression()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("CountOperation with lookup slot");
- }
- }
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("CountOperation non-variable/non-property expression");
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
- // Supported.
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
void BreakableStatementChecker::Check(Statement* stmt) {
Visit(stmt);
}
@@ -616,6 +216,12 @@ void BreakableStatementChecker::VisitThrow(Throw* expr) {
}
+void BreakableStatementChecker::VisitIncrementOperation(
+ IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void BreakableStatementChecker::VisitProperty(Property* expr) {
// Property load is breakable.
is_breakable_ = true;
@@ -654,6 +260,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
+void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
+ Visit(expr->expression());
+}
+
+
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@@ -707,6 +318,46 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
}
+bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
+ // TODO(kasperl): Once the compare stub allows leaving out the
+ // inlined smi case, we should get rid of this check.
+ if (Token::IsCompareOp(op)) return true;
+ // TODO(kasperl): Once the unary bit not stub allows leaving out
+ // the inlined smi case, we should get rid of this check.
+ if (op == Token::BIT_NOT) return true;
+ // Inline smi case inside loops, but not division and modulo which
+ // are too complicated and take up too much space.
+ return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
+}
+
+
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ // In an effect context, the true and the false case branch to the
+ // same label.
+ *if_true = *if_false = *fall_through = materialize_true;
+ break;
+ case Expression::kValue:
+ *if_true = *fall_through = materialize_true;
+ *if_false = materialize_false;
+ break;
+ case Expression::kTest:
+ *if_true = true_label_;
+ *if_false = false_label_;
+ *fall_through = fall_through_;
+ break;
+ }
+}
+
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@@ -851,79 +502,81 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) {
- EmitIsSpecObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) {
- EmitStringCharFromCode(expr->arguments());
- } else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) {
- EmitStringCharCodeAt(expr->arguments());
- } else if (strcmp("_StringCharAt", *name->ToCString()) == 0) {
- EmitStringCharAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) {
- EmitIsRegExpEquivalent(expr->arguments());
- } else if (strcmp("_IsStringWrapperSafeForDefaultValueOf",
- *name->ToCString()) == 0) {
- EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments());
- } else {
- UNREACHABLE();
+ SmartPointer<char> cstring = name->ToCString();
+
+#define CHECK_EMIT_INLINE_CALL(name, x, y) \
+ if (strcmp("_"#name, *cstring) == 0) { \
+ Emit##name(expr->arguments()); \
+ return; \
+ }
+ INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
+#undef CHECK_EMIT_INLINE_CALL
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left->ResultOverwriteAllowed()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right->ResultOverwriteAllowed()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ switch (op) {
+ case Token::COMMA:
+ VisitForEffect(left);
+ Visit(right);
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ // Figure out if either of the operands is a constant.
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, left, right)
+ : kNoConstants;
+
+ // Load only the operands that we need to materialize.
+ if (constant == kNoConstants) {
+ VisitForValue(left, kStack);
+ VisitForValue(right, kAccumulator);
+ } else if (constant == kRightConstant) {
+ VisitForValue(left, kAccumulator);
+ } else {
+ ASSERT(constant == kLeftConstant);
+ VisitForValue(right, kAccumulator);
+ }
+
+ SetSourcePosition(expr->position());
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant);
+ } else {
+ EmitBinaryOp(op, context_, mode);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
}
}
@@ -939,25 +592,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- VisitForControl(expr->left(), &done, &eval_right);
+ VisitForControl(expr->left(), &done, &eval_right, &eval_right);
break;
case Expression::kValue:
- VisitForValueControl(expr->left(),
- location_,
- &done,
- &eval_right);
+ VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
- VisitForControl(expr->left(), true_label_, &eval_right);
- break;
- case Expression::kValueTest:
- VisitForValueControl(expr->left(),
- location_,
- true_label_,
- &eval_right);
- break;
- case Expression::kTestValue:
- VisitForControl(expr->left(), true_label_, &eval_right);
+ VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
break;
}
} else {
@@ -966,25 +607,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- VisitForControl(expr->left(), &eval_right, &done);
+ VisitForControl(expr->left(), &eval_right, &done, &eval_right);
break;
case Expression::kValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- &done);
+ VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kValueTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kTestValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- false_label_);
+ VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
break;
}
}
@@ -996,6 +625,43 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::VisitLogicalForValue(Expression* expr,
+ Token::Value op,
+ Location where,
+ Label* done) {
+ ASSERT(op == Token::AND || op == Token::OR);
+ VisitForValue(expr, kAccumulator);
+ __ push(result_register());
+
+ Label discard;
+ switch (where) {
+ case kAccumulator: {
+ Label restore;
+ if (op == Token::OR) {
+ DoTest(&restore, &discard, &restore);
+ } else {
+ DoTest(&discard, &restore, &restore);
+ }
+ __ bind(&restore);
+ __ pop(result_register());
+ __ jmp(done);
+ break;
+ }
+ case kStack: {
+ if (op == Token::OR) {
+ DoTest(done, &discard, &discard);
+ } else {
+ DoTest(&discard, done, &discard);
+ }
+ break;
+ }
+ }
+
+ __ bind(&discard);
+ __ Drop(1);
+}
+
+
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
@@ -1023,16 +689,19 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
SetStatementPosition(stmt);
Label then_part, else_part, done;
- // Do not worry about optimizing for empty then or else bodies.
- VisitForControl(stmt->condition(), &then_part, &else_part);
-
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
-
- __ bind(&else_part);
- Visit(stmt->else_statement());
+ if (stmt->HasElseStatement()) {
+ VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+ } else {
+ VisitForControl(stmt->condition(), &then_part, &done, &then_part);
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ }
__ bind(&done);
}
@@ -1120,7 +789,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
+ Label body, stack_limit_hit, stack_check_success, done;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1132,28 +801,31 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
+ // Record the position of the do while condition and make sure it is
+ // possible to break on the condition.
__ bind(loop_statement.continue_target());
-
- // Record the position of the do while condition and make sure it is possible
- // to break on the condition.
SetExpressionPosition(stmt->cond(), stmt->condition_position());
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ __ bind(loop_statement.break_target());
+ __ jmp(&done);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
-
+ __ bind(&done);
decrement_loop_depth();
}
void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
- Label body, stack_limit_hit, stack_check_success;
+ Label body, stack_limit_hit, stack_check_success, done;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1163,24 +835,30 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ bind(&body);
Visit(stmt->body());
-
__ bind(loop_statement.continue_target());
- // Emit the statement position here as this is where the while statement code
- // starts.
+
+ // Emit the statement position here as this is where the while
+ // statement code starts.
SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
+
+ __ bind(loop_statement.break_target());
+ __ jmp(&done);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
+ __ bind(&done);
decrement_loop_depth();
}
@@ -1198,6 +876,11 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
__ bind(&body);
Visit(stmt->body());
@@ -1209,8 +892,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
__ bind(&test);
- // Emit the statement position here as this is where the for statement code
- // starts.
+ // Emit the statement position here as this is where the for
+ // statement code starts.
SetStatementPosition(stmt);
// Check stack before looping.
@@ -1218,16 +901,14 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ bind(&stack_check_success);
if (stmt->cond() != NULL) {
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
} else {
__ jmp(&body);
}
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
__ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -1354,7 +1035,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case);
+ VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
@@ -1426,6 +1107,11 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
}
+void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
@@ -1442,6 +1128,14 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
return 0;
}
+
+void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ Apply(context_, result_register());
+}
+
#undef __
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 00f4c06e29..840c825014 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -36,29 +36,6 @@
namespace v8 {
namespace internal {
-class FullCodeGenSyntaxChecker: public AstVisitor {
- public:
- FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
-
- void Check(FunctionLiteral* fun);
-
- bool has_supported_syntax() { return has_supported_syntax_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
-};
-
-
// AST node visitor which can tell whether a given statement will be breakable
// when the code is compiled by the full compiler in the debugger. This means
// that there will be an IC (load/store/call) in the code generated for the
@@ -96,7 +73,8 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_(0),
location_(kStack),
true_label_(NULL),
- false_label_(NULL) {
+ false_label_(NULL),
+ fall_through_(NULL) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
@@ -259,8 +237,25 @@ class FullCodeGenerator: public AstVisitor {
kStack
};
+ enum ConstantOperand {
+ kNoConstants,
+ kLeftConstant,
+ kRightConstant
+ };
+
+ // Compute the frame pointer relative offset for a given local or
+ // parameter slot.
int SlotOffset(Slot* slot);
+ // Determine whether or not to inline the smi case for the given
+ // operation.
+ bool ShouldInlineSmiCase(Token::Value op);
+
+ // Compute which (if any) of the operands is a compile-time constant.
+ ConstantOperand GetConstantOperand(Token::Value op,
+ Expression* left,
+ Expression* right);
+
// Emit code to convert a pure value (in a register, slot, as a literal,
// or on top of the stack) into the result expected according to an
// expression context.
@@ -281,7 +276,8 @@ class FullCodeGenerator: public AstVisitor {
void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
- Label** if_false);
+ Label** if_false,
+ Label** fall_through);
// Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context.
@@ -296,7 +292,14 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
- void DoTest(Expression::Context context);
+ void DoTest(Label* if_true, Label* if_false, Label* fall_through);
+
+ // Helper function to split control flow and avoid a branch to the
+ // fall-through label if it is set up.
+ void Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
@@ -323,60 +326,38 @@ class FullCodeGenerator: public AstVisitor {
location_ = saved_location;
}
- void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+ void VisitForControl(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
Expression::Context saved_context = context_;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
+ Label* saved_fall_through = fall_through_;
context_ = Expression::kTest;
true_label_ = if_true;
false_label_ = if_false;
+ fall_through_ = fall_through;
Visit(expr);
context_ = saved_context;
true_label_ = saved_true;
false_label_ = saved_false;
- }
-
- void VisitForValueControl(Expression* expr,
- Location where,
- Label* if_true,
- Label* if_false) {
- Expression::Context saved_context = context_;
- Location saved_location = location_;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- context_ = Expression::kValueTest;
- location_ = where;
- true_label_ = if_true;
- false_label_ = if_false;
- Visit(expr);
- context_ = saved_context;
- location_ = saved_location;
- true_label_ = saved_true;
- false_label_ = saved_false;
- }
-
- void VisitForControlValue(Expression* expr,
- Location where,
- Label* if_true,
- Label* if_false) {
- Expression::Context saved_context = context_;
- Location saved_location = location_;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- context_ = Expression::kTestValue;
- location_ = where;
- true_label_ = if_true;
- false_label_ = if_false;
- Visit(expr);
- context_ = saved_context;
- location_ = saved_location;
- true_label_ = saved_true;
- false_label_ = saved_false;
+ fall_through_ = saved_fall_through;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Try to perform a comparison as a fast inlined literal compare if
+ // the operands allow it. Returns true if the compare operations
+ // has been matched and all code generated; false otherwise.
+ bool TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(Variable* variable,
@@ -391,45 +372,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
-
// Platform-specific code for inline runtime calls.
void EmitInlineRuntimeCall(CallRuntime* expr);
- void EmitIsSmi(ZoneList<Expression*>* arguments);
- void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
- void EmitIsObject(ZoneList<Expression*>* arguments);
- void EmitIsSpecObject(ZoneList<Expression*>* arguments);
- void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
- void EmitIsFunction(ZoneList<Expression*>* arguments);
- void EmitIsArray(ZoneList<Expression*>* arguments);
- void EmitIsRegExp(ZoneList<Expression*>* arguments);
- void EmitIsConstructCall(ZoneList<Expression*>* arguments);
- void EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* arguments);
- void EmitObjectEquals(ZoneList<Expression*>* arguments);
- void EmitArguments(ZoneList<Expression*>* arguments);
- void EmitArgumentsLength(ZoneList<Expression*>* arguments);
- void EmitClassOf(ZoneList<Expression*>* arguments);
- void EmitValueOf(ZoneList<Expression*>* arguments);
- void EmitSetValueOf(ZoneList<Expression*>* arguments);
- void EmitNumberToString(ZoneList<Expression*>* arguments);
- void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
- void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
- void EmitStringCharAt(ZoneList<Expression*>* arguments);
- void EmitStringCompare(ZoneList<Expression*>* arguments);
- void EmitStringAdd(ZoneList<Expression*>* arguments);
- void EmitLog(ZoneList<Expression*>* arguments);
- void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
- void EmitSubString(ZoneList<Expression*>* arguments);
- void EmitRegExpExec(ZoneList<Expression*>* arguments);
- void EmitMathPow(ZoneList<Expression*>* arguments);
- void EmitMathSin(ZoneList<Expression*>* arguments);
- void EmitMathCos(ZoneList<Expression*>* arguments);
- void EmitMathSqrt(ZoneList<Expression*>* arguments);
- void EmitCallFunction(ZoneList<Expression*>* arguments);
- void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
- void EmitSwapElements(ZoneList<Expression*>* arguments);
- void EmitGetFromCache(ZoneList<Expression*>* arguments);
- void EmitIsRegExpEquivalent(ZoneList<Expression*>* arguments);
+
+#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
+ void Emit##name(ZoneList<Expression*>* arguments);
+ INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
+#undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
@@ -450,7 +399,50 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
- void EmitBinaryOp(Token::Value op, Expression::Context context);
+ void EmitBinaryOp(Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode);
+
+ // Helper functions for generating inlined smi code for certain
+ // binary operations.
+ void EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant);
+
+ void EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
+
+ void EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value);
+
+ void EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value);
+
+ void EmitConstantSmiAdd(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
+
+ void EmitConstantSmiSub(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator.
@@ -471,14 +463,6 @@ class FullCodeGenerator: public AstVisitor {
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
- // Helper for compare operations. Expects the null-value in a register.
- void EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch);
-
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
@@ -523,6 +507,14 @@ class FullCodeGenerator: public AstVisitor {
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
+ void VisitForTypeofValue(Expression* expr, Location where);
+
+ void VisitLogicalForValue(Expression* expr,
+ Token::Value op,
+ Location where,
+ Label* done);
+
+
MacroAssembler* masm_;
CompilationInfo* info_;
@@ -534,6 +526,7 @@ class FullCodeGenerator: public AstVisitor {
Location location_;
Label* true_label_;
Label* false_label_;
+ Label* fall_through_;
friend class NestedStatement;
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 2d6a86a6f7..f12d026bdb 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -44,6 +44,20 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
}
+void FuncNameInferrer::PushLiteralName(Handle<String> name) {
+ if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) {
+ names_stack_.Add(name);
+ }
+}
+
+
+void FuncNameInferrer::PushVariableName(Handle<String> name) {
+ if (IsOpen() && !Heap::result_symbol()->Equals(*name)) {
+ names_stack_.Add(name);
+ }
+}
+
+
Handle<String> FuncNameInferrer::MakeNameFromStack() {
if (names_stack_.is_empty()) {
return Factory::empty_string();
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index e88586a445..a35034ecb5 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -36,11 +36,12 @@ namespace internal {
// Inference is performed in cases when an anonymous function is assigned
// to a variable or a property (see test-func-name-inference.cc for examples.)
//
-// The basic idea is that during AST traversal LHSs of expressions are
-// always visited before RHSs. Thus, during visiting the LHS, a name can be
-// collected, and during visiting the RHS, a function literal can be collected.
-// Inference is performed while leaving the assignment node.
-class FuncNameInferrer BASE_EMBEDDED {
+// The basic idea is that during parsing of LHSs of certain expressions
+// (assignments, declarations, object literals) we collect name strings,
+// and during parsing of the RHS, a function literal can be collected. After
+// parsing the RHS we can infer a name for function literals that do not have
+// a name.
+class FuncNameInferrer : public ZoneObject {
public:
FuncNameInferrer()
: entries_stack_(10),
@@ -61,11 +62,9 @@ class FuncNameInferrer BASE_EMBEDDED {
}
// Pushes an encountered name onto names stack when in collection state.
- void PushName(Handle<String> name) {
- if (IsOpen()) {
- names_stack_.Add(name);
- }
- }
+ void PushLiteralName(Handle<String> name);
+
+ void PushVariableName(Handle<String> name);
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
@@ -75,11 +74,16 @@ class FuncNameInferrer BASE_EMBEDDED {
}
// Infers a function name and leaves names collection state.
- void InferAndLeave() {
+ void Infer() {
ASSERT(IsOpen());
if (!funcs_to_infer_.is_empty()) {
InferFunctionsNames();
}
+ }
+
+ // Infers a function name and leaves names collection state.
+ void Leave() {
+ ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
@@ -102,34 +106,6 @@ class FuncNameInferrer BASE_EMBEDDED {
};
-// A wrapper class that automatically calls InferAndLeave when
-// leaving scope.
-class ScopedFuncNameInferrer BASE_EMBEDDED {
- public:
- explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
- : inferrer_(inferrer),
- is_entered_(false) {}
-
- ~ScopedFuncNameInferrer() {
- if (is_entered_) {
- inferrer_->InferAndLeave();
- }
- }
-
- // Triggers the wrapped inferrer into name collection state.
- void Enter() {
- inferrer_->Enter();
- is_entered_ = true;
- }
-
- private:
- FuncNameInferrer* inferrer_;
- bool is_entered_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedFuncNameInferrer);
-};
-
-
} } // namespace v8::internal
#endif // V8_FUNC_NAME_INFERRER_H_
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 3fe9e240b5..f168d6eb14 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -244,10 +244,12 @@ const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
+const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+const uint32_t kDebugZapValue = 0xbadbaddb;
#endif
@@ -662,7 +664,7 @@ F FUNCTION_CAST(Address addr) {
#define TRACK_MEMORY(name)
#endif
-// define used for helping GCC to make better inlining. Don't bother for debug
+// Define used for helping GCC to make better inlining. Don't bother for debug
// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
// errors in debug build.
#if defined(__GNUC__) && !defined(DEBUG)
@@ -678,6 +680,14 @@ F FUNCTION_CAST(Address addr) {
#define NO_INLINE(header) header
#endif
+
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
+#else
+#define MUST_USE_RESULT
+#endif
+
+
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7b76e923fa..01464016be 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -773,6 +773,7 @@ bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
+ function->shared()->set_code_age(0);
return true;
} else {
CompilationInfo info(function, 0, receiver);
@@ -788,6 +789,7 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
+ function->shared()->set_code_age(0);
return true;
} else {
CompilationInfo info(function, 1, receiver);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 656c5546b2..0d1ad5ada9 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -28,7 +28,8 @@
#ifndef V8_HEAP_INL_H_
#define V8_HEAP_INL_H_
-#include "log.h"
+#include "heap.h"
+#include "objects.h"
#include "v8-counters.h"
namespace v8 {
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 7668bbc150..e47d66f984 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -280,10 +280,12 @@ void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
printer_->PrintRetainers(cluster, stream);
}
+} // namespace
+
// A helper class for building a retainers tree, that aggregates
// all equivalent clusters.
-class RetainerTreeAggregator BASE_EMBEDDED {
+class RetainerTreeAggregator {
public:
explicit RetainerTreeAggregator(ClustersCoarser* coarser)
: coarser_(coarser) {}
@@ -311,8 +313,6 @@ void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
tree->ForEach(&retainers_aggregator);
}
-} // namespace
-
HeapProfiler* HeapProfiler::singleton_ = NULL;
@@ -347,30 +347,46 @@ void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name);
+ return singleton_->TakeSnapshotImpl(name, type);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name);
+ return singleton_->TakeSnapshotImpl(name, type);
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
Heap::CollectAllGarbage(true);
- HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
- HeapSnapshotGenerator generator(result);
- generator.GenerateSnapshot();
+ HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
+ HeapSnapshot* result =
+ snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
+ switch (s_type) {
+ case HeapSnapshot::kFull: {
+ HeapSnapshotGenerator generator(result);
+ generator.GenerateSnapshot();
+ break;
+ }
+ case HeapSnapshot::kAggregated: {
+ AggregatedHeapSnapshot agg_snapshot;
+ AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
+ generator.GenerateSnapshot();
+ generator.FillHeapSnapshot(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
snapshots_->SnapshotGenerationFinished();
return result;
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) {
- return TakeSnapshotImpl(snapshots_->GetName(name));
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
+ return TakeSnapshotImpl(snapshots_->GetName(name), type);
}
@@ -433,16 +449,25 @@ static const char* GetConstructorName(const char* name) {
}
-void JSObjectsCluster::Print(StringStream* accumulator) const {
- ASSERT(!is_null());
+const char* JSObjectsCluster::GetSpecialCaseName() const {
if (constructor_ == FromSpecialCase(ROOTS)) {
- accumulator->Add("(roots)");
+ return "(roots)";
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
- accumulator->Add("(global property)");
+ return "(global property)";
} else if (constructor_ == FromSpecialCase(CODE)) {
- accumulator->Add("(code)");
+ return "(code)";
} else if (constructor_ == FromSpecialCase(SELF)) {
- accumulator->Add("(self)");
+ return "(self)";
+ }
+ return NULL;
+}
+
+
+void JSObjectsCluster::Print(StringStream* accumulator) const {
+ ASSERT(!is_null());
+ const char* special_case_name = GetSpecialCaseName();
+ if (special_case_name != NULL) {
+ accumulator->Add(special_case_name);
} else {
SmartPointer<char> s_name(
constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
@@ -618,13 +643,19 @@ const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT) {
+ : zscope_(DELETE_ON_EXIT),
+ aggregator_(NULL) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
}
+RetainerHeapProfile::~RetainerHeapProfile() {
+ delete aggregator_;
+}
+
+
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
@@ -646,18 +677,22 @@ void RetainerHeapProfile::CollectStats(HeapObject* obj) {
}
+void RetainerHeapProfile::CoarseAndAggregate() {
+ coarser_.Process(&retainers_tree_);
+ ASSERT(aggregator_ == NULL);
+ aggregator_ = new RetainerTreeAggregator(&coarser_);
+ aggregator_->Process(&retainers_tree_);
+}
+
+
void RetainerHeapProfile::DebugPrintStats(
RetainerHeapProfile::Printer* printer) {
- coarser_.Process(&retainers_tree_);
// Print clusters that have no equivalents, aggregating their retainers.
AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
retainers_tree_.ForEach(&agg_printer);
- // Now aggregate clusters that have equivalents...
- RetainerTreeAggregator aggregator(&coarser_);
- aggregator.Process(&retainers_tree_);
- // ...and print them.
+ // Print clusters that have equivalents.
SimpleRetainerTreePrinter s_printer(printer);
- aggregator.output_tree().ForEach(&s_printer);
+ aggregator_->output_tree().ForEach(&s_printer);
}
@@ -670,16 +705,6 @@ void RetainerHeapProfile::PrintStats() {
//
// HeapProfiler class implementation.
//
-void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- if (!FreeListNode::IsFreeListNode(obj)) {
- info[type].increment_number(1);
- info[type].increment_bytes(obj->Size());
- }
-}
-
-
static void StackWeakReferenceCallback(Persistent<Value> object,
void* trace) {
DeleteArray(static_cast<Address*>(trace));
@@ -702,46 +727,339 @@ void HeapProfiler::WriteSample() {
LOG(HeapSampleStats(
"Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
- HistogramInfo info[LAST_TYPE+1];
-#define DEF_TYPE_NAME(name) info[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
+ AggregatedHeapSnapshot snapshot;
+ AggregatedHeapSnapshotGenerator generator(&snapshot);
+ generator.GenerateSnapshot();
- ConstructorHeapProfile js_cons_profile;
- RetainerHeapProfile js_retainer_profile;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- CollectStats(obj, info);
- js_cons_profile.CollectStats(obj);
- js_retainer_profile.CollectStats(obj);
+ HistogramInfo* info = snapshot.info();
+ for (int i = FIRST_NONSTRING_TYPE;
+ i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
+ ++i) {
+ if (info[i].bytes() > 0) {
+ LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
+ }
}
+ snapshot.js_cons_profile()->PrintStats();
+ snapshot.js_retainer_profile()->PrintStats();
+
+ GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
+ StackWeakReferenceCallback);
+
+ LOG(HeapSampleEndEvent("Heap", "allocated"));
+}
+
+
+AggregatedHeapSnapshot::AggregatedHeapSnapshot()
+ : info_(NewArray<HistogramInfo>(
+ AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
+#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
+#undef DEF_TYPE_NAME
+ info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
+ "STRING_TYPE");
+}
+
+
+AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
+ DeleteArray(info_);
+}
+
+
+AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
+ AggregatedHeapSnapshot* agg_snapshot)
+ : agg_snapshot_(agg_snapshot) {
+}
+
+
+void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
+ HistogramInfo* info = agg_snapshot_->info();
+ HistogramInfo& strings = info[kAllStringsType];
// Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
#define INCREMENT_SIZE(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT_SIZE)
+ strings.increment_number(info[type].number()); \
+ strings.increment_bytes(info[type].bytes());
+ STRING_TYPE_LIST(INCREMENT_SIZE);
#undef INCREMENT_SIZE
- if (string_bytes > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+}
+
+
+void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ if (!FreeListNode::IsFreeListNode(obj)) {
+ agg_snapshot_->info()[type].increment_number(1);
+ agg_snapshot_->info()[type].increment_bytes(obj->Size());
}
+}
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].bytes() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
+
+void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ CollectStats(obj);
+ agg_snapshot_->js_cons_profile()->CollectStats(obj);
+ agg_snapshot_->js_retainer_profile()->CollectStats(obj);
+ }
+ CalculateStringsStats();
+ agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
+}
+
+
+class CountingConstructorHeapProfileIterator {
+ public:
+ CountingConstructorHeapProfileIterator()
+ : entities_count_(0), children_count_(0) {
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ ++entities_count_;
+ children_count_ += number_and_size.number();
+ }
+
+ int entities_count() { return entities_count_; }
+ int children_count() { return children_count_; }
+
+ private:
+ int entities_count_;
+ int children_count_;
+};
+
+
+static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
+ int* root_child_index,
+ HeapEntry::Type type,
+ const char* name,
+ int count,
+ int size,
+ int children_count,
+ int retainers_count) {
+ HeapEntry* entry = snapshot->AddEntry(
+ type, name, count, size, children_count, retainers_count);
+ ASSERT(entry != NULL);
+ snapshot->root()->SetUnidirElementReference(*root_child_index,
+ *root_child_index + 1,
+ entry);
+ *root_child_index = *root_child_index + 1;
+ return entry;
+}
+
+
+class AllocatingConstructorHeapProfileIterator {
+ public:
+ AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
+ int* root_child_index)
+ : snapshot_(snapshot),
+ root_child_index_(root_child_index) {
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ const char* name = cluster.GetSpecialCaseName();
+ if (name == NULL) {
+ name = snapshot_->collection()->GetFunctionName(cluster.constructor());
}
+ AddEntryFromAggregatedSnapshot(snapshot_,
+ root_child_index_,
+ HeapEntry::kObject,
+ name,
+ number_and_size.number(),
+ number_and_size.bytes(),
+ 0,
+ 0);
}
- js_cons_profile.PrintStats();
- js_retainer_profile.PrintStats();
+ private:
+ HeapSnapshot* snapshot_;
+ int* root_child_index_;
+};
- GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
- StackWeakReferenceCallback);
- LOG(HeapSampleEndEvent("Heap", "allocated"));
+static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
+ return cluster.can_be_coarsed() ?
+ reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
+}
+
+
+static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
+ if (object->IsString()) {
+ return JSObjectsCluster(String::cast(object));
+ } else {
+ JSObject* js_obj = JSObject::cast(object);
+ String* constructor = JSObject::cast(js_obj)->constructor_name();
+ return JSObjectsCluster(constructor, object);
+ }
+}
+
+
+class CountingRetainersIterator {
+ public:
+ CountingRetainersIterator(const JSObjectsCluster& child_cluster,
+ HeapEntriesMap* map)
+ : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
+ if (map_->Map(child_) == NULL)
+ map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder);
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
+ map_->Pair(ClusterAsHeapObject(cluster),
+ HeapEntriesMap::kHeapEntryPlaceholder);
+ map_->CountReference(ClusterAsHeapObject(cluster), child_);
+ }
+
+ private:
+ HeapObject* child_;
+ HeapEntriesMap* map_;
+};
+
+
+class AllocatingRetainersIterator {
+ public:
+ AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
+ HeapEntriesMap* map)
+ : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
+ child_entry_ = map_->Map(child_);
+ ASSERT(child_entry_ != NULL);
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ int child_index, retainer_index;
+ map_->CountReference(ClusterAsHeapObject(cluster), child_,
+ &child_index, &retainer_index);
+ map_->Map(ClusterAsHeapObject(cluster))->SetElementReference(
+ child_index, number_and_size.number(), child_entry_, retainer_index);
+ }
+
+ private:
+ HeapObject* child_;
+ HeapEntriesMap* map_;
+ HeapEntry* child_entry_;
+};
+
+
+template<class RetainersIterator>
+class AggregatingRetainerTreeIterator {
+ public:
+ explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
+ HeapEntriesMap* map)
+ : coarser_(coarser), map_(map) {
+ }
+
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
+ if (coarser_ != NULL &&
+ !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+ JSObjectsClusterTree* tree_to_iterate = tree;
+ ZoneScope zs(DELETE_ON_EXIT);
+ JSObjectsClusterTree dest_tree_;
+ if (coarser_ != NULL) {
+ RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+ tree->ForEach(&retainers_aggregator);
+ tree_to_iterate = &dest_tree_;
+ }
+ RetainersIterator iterator(cluster, map_);
+ tree_to_iterate->ForEach(&iterator);
+ }
+
+ private:
+ ClustersCoarser* coarser_;
+ HeapEntriesMap* map_;
+};
+
+
+class AggregatedRetainerTreeAllocator {
+ public:
+ AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
+ int* root_child_index)
+ : snapshot_(snapshot), root_child_index_(root_child_index) {
+ }
+
+ HeapEntry* GetEntry(
+ HeapObject* obj, int children_count, int retainers_count) {
+ JSObjectsCluster cluster = HeapObjectAsCluster(obj);
+ const char* name = cluster.GetSpecialCaseName();
+ if (name == NULL) {
+ name = snapshot_->collection()->GetFunctionName(cluster.constructor());
+ }
+ return AddEntryFromAggregatedSnapshot(
+ snapshot_, root_child_index_, HeapEntry::kObject, name,
+ 0, 0, children_count, retainers_count);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ int* root_child_index_;
+};
+
+
+template<class Iterator>
+void AggregatedHeapSnapshotGenerator::IterateRetainers(
+ HeapEntriesMap* entries_map) {
+ RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
+ AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
+ p->coarser(), entries_map);
+ p->retainers_tree()->ForEach(&agg_ret_iter_1);
+ AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map);
+ p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
+}
+
+
+void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
+ // Count the number of entities.
+ int histogram_entities_count = 0;
+ int histogram_children_count = 0;
+ int histogram_retainers_count = 0;
+ for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
+ if (agg_snapshot_->info()[i].bytes() > 0) {
+ ++histogram_entities_count;
+ }
+ }
+ CountingConstructorHeapProfileIterator counting_cons_iter;
+ agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
+ histogram_entities_count += counting_cons_iter.entities_count();
+ HeapEntriesMap entries_map;
+ IterateRetainers<CountingRetainersIterator>(&entries_map);
+ histogram_entities_count += entries_map.entries_count();
+ histogram_children_count += entries_map.total_children_count();
+ histogram_retainers_count += entries_map.total_retainers_count();
+
+ // Root entry references all other entries.
+ histogram_children_count += histogram_entities_count;
+ int root_children_count = histogram_entities_count;
+ ++histogram_entities_count;
+
+ // Allocate and fill entries in the snapshot, allocate references.
+ snapshot->AllocateEntries(histogram_entities_count,
+ histogram_children_count,
+ histogram_retainers_count);
+ snapshot->AddEntry(HeapSnapshot::kInternalRootObject,
+ root_children_count,
+ 0);
+ int root_child_index = 0;
+ for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
+ if (agg_snapshot_->info()[i].bytes() > 0) {
+ AddEntryFromAggregatedSnapshot(snapshot,
+ &root_child_index,
+ HeapEntry::kInternal,
+ agg_snapshot_->info()[i].name(),
+ agg_snapshot_->info()[i].number(),
+ agg_snapshot_->info()[i].bytes(),
+ 0,
+ 0);
+ }
+ }
+ AllocatingConstructorHeapProfileIterator alloc_cons_iter(
+ snapshot, &root_child_index);
+ agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
+ AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
+ entries_map.UpdateEntries(&allocator);
+
+ // Fill up references.
+ IterateRetainers<AllocatingRetainersIterator>(&entries_map);
}
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index dac488e9fe..2ef081ee29 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -56,8 +56,8 @@ class HeapProfiler {
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
- static HeapSnapshot* TakeSnapshot(const char* name);
- static HeapSnapshot* TakeSnapshot(String* name);
+ static HeapSnapshot* TakeSnapshot(const char* name, int type);
+ static HeapSnapshot* TakeSnapshot(String* name, int type);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
@@ -75,12 +75,8 @@ class HeapProfiler {
private:
HeapProfiler();
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name);
- HeapSnapshot* TakeSnapshotImpl(String* name);
-
- // Obsolete interface.
- // Update the array info with stats from obj.
- static void CollectStats(HeapObject* obj, HistogramInfo* info);
+ HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
+ HeapSnapshot* TakeSnapshotImpl(String* name, int type);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
@@ -132,7 +128,9 @@ class JSObjectsCluster BASE_EMBEDDED {
bool is_null() const { return constructor_ == NULL; }
bool can_be_coarsed() const { return instance_ != NULL; }
String* constructor() const { return constructor_; }
+ Object* instance() const { return instance_; }
+ const char* GetSpecialCaseName() const;
void Print(StringStream* accumulator) const;
// Allows null clusters to be printed.
void DebugPrint(StringStream* accumulator) const;
@@ -179,6 +177,9 @@ class ConstructorHeapProfile BASE_EMBEDDED {
virtual ~ConstructorHeapProfile() {}
void CollectStats(HeapObject* obj);
void PrintStats();
+
+ template<class Callback>
+ void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
// Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
virtual void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size);
@@ -282,6 +283,8 @@ class ClustersCoarser BASE_EMBEDDED {
// "retainer profile" of JS objects allocated on heap.
// It is run during garbage collection cycle, thus it doesn't need
// to use handles.
+class RetainerTreeAggregator;
+
class RetainerHeapProfile BASE_EMBEDDED {
public:
class Printer {
@@ -292,7 +295,14 @@ class RetainerHeapProfile BASE_EMBEDDED {
};
RetainerHeapProfile();
+ ~RetainerHeapProfile();
+
+ RetainerTreeAggregator* aggregator() { return aggregator_; }
+ ClustersCoarser* coarser() { return &coarser_; }
+ JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
+
void CollectStats(HeapObject* obj);
+ void CoarseAndAggregate();
void PrintStats();
void DebugPrintStats(Printer* printer);
void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
@@ -301,6 +311,44 @@ class RetainerHeapProfile BASE_EMBEDDED {
ZoneScope zscope_;
JSObjectsRetainerTree retainers_tree_;
ClustersCoarser coarser_;
+ RetainerTreeAggregator* aggregator_;
+};
+
+
+class AggregatedHeapSnapshot {
+ public:
+ AggregatedHeapSnapshot();
+ ~AggregatedHeapSnapshot();
+
+ HistogramInfo* info() { return info_; }
+ ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
+ RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
+
+ private:
+ HistogramInfo* info_;
+ ConstructorHeapProfile js_cons_profile_;
+ RetainerHeapProfile js_retainer_profile_;
+};
+
+
+class HeapEntriesMap;
+class HeapSnapshot;
+
+class AggregatedHeapSnapshotGenerator {
+ public:
+ explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
+ void GenerateSnapshot();
+ void FillHeapSnapshot(HeapSnapshot* snapshot);
+
+ static const int kAllStringsType = LAST_TYPE + 1;
+
+ private:
+ void CalculateStringsStats();
+ void CollectStats(HeapObject* obj);
+ template<class Iterator>
+ void IterateRetainers(HeapEntriesMap* entries_map);
+
+ AggregatedHeapSnapshot* agg_snapshot_;
};
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 1d696c7a16..443c926d95 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -104,6 +104,7 @@ List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL;
+HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
@@ -193,6 +194,33 @@ bool Heap::HasBeenSetup() {
}
+int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return object->SizeFromMap(map_word.ToMap());
+}
+
+
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(MarkCompactCollector::are_map_pointers_encoded());
+ uint32_t marker = Memory::uint32_at(object->address());
+ if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+ return kIntSize;
+ } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+ return Memory::int_at(object->address() + kIntSize);
+ } else {
+ MapWord map_word = object->map_word();
+ Address map_address = map_word.DecodeMapAddress(Heap::map_space());
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+ return object->SizeFromMap(map);
+ }
+}
+
+
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
@@ -540,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RelinkPageListInChunkOrder(true);
+ }
+
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -571,6 +606,22 @@ void Heap::ClearJSFunctionResultCaches() {
}
+class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
+ virtual void VisitThread(ThreadLocalTop* top) {
+ Context* context = top->context_;
+ if (context == NULL) return;
+ context->global()->global_context()->normalized_map_cache()->Clear();
+ }
+};
+
+
+void Heap::ClearNormalizedMapCaches() {
+ if (Bootstrapper::IsActive()) return;
+ ClearThreadNormalizedMapCachesVisitor visitor;
+ ThreadManager::IterateArchivedThreads(&visitor);
+}
+
+
#ifdef DEBUG
enum PageWatermarkValidity {
@@ -637,12 +688,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
int start_new_space_size = Heap::new_space()->Size();
if (collector == MARK_COMPACTOR) {
- if (FLAG_flush_code) {
- // Flush all potentially unused code.
- GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE);
- FlushCode();
- }
-
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
@@ -732,8 +777,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
MarkCompactCollector::CollectGarbage();
- MarkCompactEpilogue(is_compacting);
-
LOG(ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
@@ -755,18 +798,11 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
CompilationCache::MarkCompactPrologue();
- Top::MarkCompactPrologue(is_compacting);
- ThreadManager::MarkCompactPrologue(is_compacting);
-
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
-}
-
-void Heap::MarkCompactEpilogue(bool is_compacting) {
- Top::MarkCompactEpilogue(is_compacting);
- ThreadManager::MarkCompactEpilogue(is_compacting);
+ ClearNormalizedMapCaches();
}
@@ -1100,6 +1136,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
VisitSpecialized<SharedFunctionInfo::kSize>);
+ table_.Register(kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ VisitSpecialized<JSFunction::kSize>);
+
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -1415,7 +1455,7 @@ bool Heap::CreateInitialMaps() {
set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map);
- obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
+ obj = AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_fixed_array_map(Map::cast(obj));
@@ -1457,6 +1497,11 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (obj->IsFailure()) return false;
+ set_fixed_cow_array_map(Map::cast(obj));
+ ASSERT(fixed_array_map() != fixed_cow_array_map());
+
obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (obj->IsFailure()) return false;
set_heap_number_map(Map::cast(obj));
@@ -1472,17 +1517,17 @@ bool Heap::CreateInitialMaps() {
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_undetectable_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+ obj = AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_undetectable_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
+ obj = AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_byte_array_map(Map::cast(obj));
@@ -1525,7 +1570,7 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_external_float_array_map(Map::cast(obj));
- obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
+ obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj));
@@ -1549,19 +1594,19 @@ bool Heap::CreateInitialMaps() {
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_hash_table_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_context_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_catch_context_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_global_context_map(Map::cast(obj));
@@ -2354,109 +2399,6 @@ Object* Heap::AllocateExternalArray(int length,
}
-// The StackVisitor is used to traverse all the archived threads to see if
-// there are activations on any of the stacks corresponding to the code.
-class FlushingStackVisitor : public ThreadVisitor {
- public:
- explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
-
- void VisitThread(ThreadLocalTop* top) {
- // If we already found the code in a previous traversed thread we return.
- if (found_) return;
-
- for (StackFrameIterator it(top); !it.done(); it.Advance()) {
- if (code_->contains(it.frame()->pc())) {
- found_ = true;
- return;
- }
- }
- }
- bool FoundCode() {return found_;}
-
- private:
- bool found_;
- Code* code_;
-};
-
-
-static bool CodeIsActive(Code* code) {
- // Make sure we are not referencing the code from the stack.
- for (StackFrameIterator it; !it.done(); it.Advance()) {
- if (code->contains(it.frame()->pc())) return true;
- }
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- FlushingStackVisitor threadvisitor(code);
- ThreadManager::IterateArchivedThreads(&threadvisitor);
- if (threadvisitor.FoundCode()) return true;
- return false;
-}
-
-
-static void FlushCodeForFunction(JSFunction* function) {
- SharedFunctionInfo* shared_info = function->shared();
-
- // Special handling if the function and shared info objects
- // have different code objects.
- if (function->code() != shared_info->code()) {
- // If the shared function has been flushed but the function has not,
- // we flush the function if possible.
- if (!shared_info->is_compiled() && function->is_compiled() &&
- !CodeIsActive(function->code())) {
- function->set_code(shared_info->code());
- }
- return;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
-
- // We never flush code for Api functions.
- if (shared_info->IsApiFunction()) return;
-
- // Only flush code for functions.
- if (!shared_info->code()->kind() == Code::FUNCTION) return;
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) return;
-
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) return;
-
- // If this function is in the compilation cache we do not flush the code.
- if (CompilationCache::HasFunction(shared_info)) return;
-
- // Check stack and archived threads for the code.
- if (CodeIsActive(shared_info->code())) return;
-
- // Compute the lazy compilable version of the code.
- Code* code = Builtins::builtin(Builtins::LazyCompile);
- shared_info->set_code(code);
- function->set_code(code);
-}
-
-
-void Heap::FlushCode() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Do not flush code if the debugger is loaded or there are breakpoints.
- if (Debug::IsLoaded() || Debug::has_break_points()) return;
-#endif
- HeapObjectIterator it(old_pointer_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
-
- // The function must have a valid context and not be a builtin.
- if (function->unchecked_context()->IsContext() &&
- !function->IsBuiltin()) {
- FlushCodeForFunction(function);
- }
- }
- }
-}
-
-
Object* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference) {
@@ -2910,7 +2852,9 @@ Object* Heap::CopyJSObject(JSObject* source) {
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
- Object* elem = CopyFixedArray(elements);
+ Object* elem =
+ (elements->map() == fixed_cow_array_map()) ?
+ elements : CopyFixedArray(elements);
if (elem->IsFailure()) return elem;
JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
}
@@ -4057,8 +4001,8 @@ bool Heap::ConfigureHeapDefault() {
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
- *stats->start_marker = 0xDECADE00;
- *stats->end_marker = 0xDECADE01;
+ *stats->start_marker = HeapStats::kStartMarker;
+ *stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.Size();
*stats->new_space_capacity = new_space_.Capacity();
*stats->old_pointer_space_size = old_pointer_space_->Size();
@@ -4129,6 +4073,8 @@ bool Heap::Setup(bool create_heap_objects) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
+ MarkMapPointersAsEncoded(false);
+
// Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
@@ -4815,7 +4761,6 @@ GCTracer::~GCTracer() {
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
- PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
PrintF("total_size_before=%d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects());
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 93b90b1846..484cd22bdf 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -30,6 +30,7 @@
#include <math.h>
+#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
@@ -55,6 +56,7 @@ namespace internal {
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
+ V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \
V(Object, termination_exception, TerminationException) \
@@ -312,61 +314,64 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateJSObject(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateGlobalObject(JSFunction* constructor);
+ MUST_USE_RESULT static Object* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- static Object* CopyJSObject(JSObject* source);
+ MUST_USE_RESULT static Object* CopyJSObject(JSObject* source);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFunctionPrototype(JSFunction* function);
+ MUST_USE_RESULT static Object* AllocateFunctionPrototype(
+ JSFunction* function);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
// object that has been freshly allocated using the constructor.
- static Object* ReinitializeJSGlobalProxy(JSFunction* constructor,
- JSGlobalProxy* global);
+ MUST_USE_RESULT static Object* ReinitializeJSGlobalProxy(
+ JSFunction* constructor,
+ JSGlobalProxy* global);
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSObjectFromMap(Map* map,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateJSObjectFromMap(
+ Map* map, PretenureFlag pretenure = NOT_TENURED);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT static Object* Allocate(Map* map, AllocationSpace space);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* AllocateMap(InstanceType instance_type, int instance_size);
+ MUST_USE_RESULT static Object* AllocateMap(InstanceType instance_type,
+ int instance_size);
// Allocates a partial map for bootstrapping.
- static Object* AllocatePartialMap(InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT static Object* AllocatePartialMap(InstanceType instance_type,
+ int instance_size);
// Allocate a map for the specified function
- static Object* AllocateInitialMap(JSFunction* fun);
+ MUST_USE_RESULT static Object* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache.
- static Object* AllocateCodeCache();
+ MUST_USE_RESULT static Object* AllocateCodeCache();
// Clear the Instanceof cache (used when a prototype changes).
static void ClearInstanceofCache() {
@@ -391,13 +396,13 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateStringFromAscii(
+ MUST_USE_RESULT static Object* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateStringFromUtf8(
+ MUST_USE_RESULT static Object* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateStringFromTwoByte(
+ MUST_USE_RESULT static Object* AllocateStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
@@ -405,16 +410,15 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static inline Object* AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT static inline Object* AllocateSymbol(Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT static Object* AllocateInternalSymbol(
+ unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
- static Object* AllocateExternalSymbol(Vector<const char> str,
- int chars);
+ MUST_USE_RESULT static Object* AllocateExternalSymbol(Vector<const char> str,
+ int chars);
// Allocates and partially initializes a String. There are two String
@@ -424,10 +428,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateRawAsciiString(
+ MUST_USE_RESULT static Object* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateRawTwoByteString(
+ MUST_USE_RESULT static Object* AllocateRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -435,97 +439,103 @@ class Heap : public AllStatic {
// A cache is used for ascii codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection.
- static Object* LookupSingleCharacterStringFromCode(uint16_t code);
+ MUST_USE_RESULT static Object* LookupSingleCharacterStringFromCode(
+ uint16_t code);
// Allocate a byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateByteArray(int length, PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateByteArray(int length,
+ PretenureFlag pretenure);
// Allocate a non-tenured byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateByteArray(int length);
+ MUST_USE_RESULT static Object* AllocateByteArray(int length);
// Allocate a pixel array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocatePixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocatePixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateExternalArray(
+ int length,
+ ExternalArrayType array_type,
+ void* external_pointer,
+ PretenureFlag pretenure);
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSGlobalPropertyCell(Object* value);
+ MUST_USE_RESULT static Object* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateFixedArray(int length,
+ PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values
- static Object* AllocateFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateUninitializedFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- static Object* CopyFixedArray(FixedArray* src);
+ MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src);
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFixedArrayWithHoles(
+ MUST_USE_RESULT static Object* AllocateFixedArrayWithHoles(
int length,
PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
- static Object* AllocateHashTable(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateHashTable(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
- static Object* AllocateGlobalContext();
+ MUST_USE_RESULT static Object* AllocateGlobalContext();
// Allocate a function context.
- static Object* AllocateFunctionContext(int length, JSFunction* closure);
+ MUST_USE_RESULT static Object* AllocateFunctionContext(int length,
+ JSFunction* closure);
// Allocate a 'with' context.
- static Object* AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context);
+ MUST_USE_RESULT static Object* AllocateWithContext(Context* previous,
+ JSObject* extension,
+ bool is_catch_context);
// Allocates a new utility object in the old generation.
- static Object* AllocateStruct(InstanceType type);
+ MUST_USE_RESULT static Object* AllocateStruct(InstanceType type);
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFunction(Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
+ MUST_USE_RESULT static Object* AllocateFunction(
+ Map* function_map,
+ SharedFunctionInfo* shared,
+ Object* prototype,
+ PretenureFlag pretenure = TENURED);
// Indicies for direct access into argument objects.
static const int kArgumentsObjectSize =
@@ -537,47 +547,52 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateArgumentsObject(Object* callee, int length);
+ MUST_USE_RESULT static Object* AllocateArgumentsObject(Object* callee,
+ int length);
// Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_)
- static Object* NumberFromDouble(double value,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* NumberFromDouble(
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Allocated a HeapNumber from value.
- static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
- static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED
+ MUST_USE_RESULT static Object* AllocateHeapNumber(double value,
+ PretenureFlag pretenure);
+ // pretenure = NOT_TENURED.
+ MUST_USE_RESULT static Object* AllocateHeapNumber(double value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static inline Object* NumberFromInt32(int32_t value);
+ MUST_USE_RESULT static inline Object* NumberFromInt32(int32_t value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static inline Object* NumberFromUint32(uint32_t value);
+ MUST_USE_RESULT static inline Object* NumberFromUint32(uint32_t value);
// Allocates a new proxy object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateProxy(Address proxy,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateProxy(
+ Address proxy,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates a new SharedFunctionInfo object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateSharedFunctionInfo(Object* name);
+ MUST_USE_RESULT static Object* AllocateSharedFunctionInfo(Object* name);
// Allocates a new cons string object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateConsString(String* first, String* second);
+ MUST_USE_RESULT static Object* AllocateConsString(String* first,
+ String* second);
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
@@ -585,19 +600,20 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateSubString(
+ String* buffer,
+ int start,
+ int end,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a new external string object, which is backed by a string
// resource that resides outside the V8 heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateExternalStringFromAscii(
+ MUST_USE_RESULT static Object* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- static Object* AllocateExternalStringFromTwoByte(
+ MUST_USE_RESULT static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
@@ -609,9 +625,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static inline Object* AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
+ MUST_USE_RESULT static inline Object* AllocateRaw(
+ int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space);
// Initialize a filler object to keep the ability to iterate over the heap
// when shortening objects.
@@ -623,26 +640,26 @@ class Heap : public AllStatic {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- static Object* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference);
+ MUST_USE_RESULT static Object* CreateCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference);
- static Object* CopyCode(Code* code);
+ MUST_USE_RESULT static Object* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
- static Object* CopyCode(Code* code, Vector<byte> reloc_info);
+ MUST_USE_RESULT static Object* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* LookupSymbol(Vector<const char> str);
- static Object* LookupAsciiSymbol(const char* str) {
+ MUST_USE_RESULT static Object* LookupSymbol(Vector<const char> str);
+ MUST_USE_RESULT static Object* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
- static Object* LookupSymbol(String* str);
+ MUST_USE_RESULT static Object* LookupSymbol(String* str);
static bool LookupSymbolIfExists(String* str, String** symbol);
static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@@ -657,7 +674,7 @@ class Heap : public AllStatic {
// string might stay non-flat even when not a failure is returned.
//
// Please note this function does not perform a garbage collection.
- static inline Object* PrepareForCompare(String* str);
+ MUST_USE_RESULT static inline Object* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value.
static Object* ToBoolean(bool condition) {
@@ -817,6 +834,13 @@ class Heap : public AllStatic {
roots_[kCodeStubsRootIndex] = value;
}
+ // Support for computing object sizes for old objects during GCs. Returns
+ // a function that is guaranteed to be safe for computing object sizes in
+ // the current GC phase.
+ static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+ return gc_safe_size_of_old_object_;
+ }
+
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
@@ -856,8 +880,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* CreateSymbol(const char* str, int length, int hash);
- static Object* CreateSymbol(String* str);
+ MUST_USE_RESULT static Object* CreateSymbol(const char* str,
+ int length,
+ int hash);
+ MUST_USE_RESULT static Object* CreateSymbol(String* str);
// Write barrier support for address[offset] = o.
static inline void RecordWrite(Address address, int offset);
@@ -929,9 +955,9 @@ class Heap : public AllStatic {
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate uninitialized fixed array.
- static Object* AllocateRawFixedArray(int length);
- static Object* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateRawFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateRawFixedArray(int length,
+ PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
@@ -974,8 +1000,9 @@ class Heap : public AllStatic {
kRootListLength
};
- static Object* NumberToString(Object* number,
- bool check_number_string_cache = true);
+ MUST_USE_RESULT static Object* NumberToString(
+ Object* number,
+ bool check_number_string_cache = true);
static Map* MapForExternalArrayType(ExternalArrayType array_type);
static RootListIndex RootIndexForExternalArrayType(
@@ -1020,6 +1047,8 @@ class Heap : public AllStatic {
static void ClearJSFunctionResultCaches();
+ static void ClearNormalizedMapCaches();
+
static GCTracer* tracer() { return tracer_; }
private:
@@ -1168,6 +1197,18 @@ class Heap : public AllStatic {
static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_;
+ // Support for computing object sizes during GC.
+ static HeapObjectCallback gc_safe_size_of_old_object_;
+ static int GcSafeSizeOfOldObject(HeapObject* object);
+ static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
+
+ // Update the GC state. Called from the mark-compact collector.
+ static void MarkMapPointersAsEncoded(bool encoded) {
+ gc_safe_size_of_old_object_ = encoded
+ ? &GcSafeSizeOfOldObjectWithEncodedMap
+ : &GcSafeSizeOfOldObject;
+ }
+
// Checks whether a global GC is necessary
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
@@ -1180,10 +1221,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
- static inline Object* AllocateRawMap();
+ MUST_USE_RESULT static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
- static inline Object* AllocateRawCell();
+ MUST_USE_RESULT static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj,
@@ -1221,7 +1262,6 @@ class Heap : public AllStatic {
// Code to be run before and after mark-compact.
static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
@@ -1245,9 +1285,10 @@ class Heap : public AllStatic {
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- static inline Object* InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
+ MUST_USE_RESULT static inline Object* InitializeFunction(
+ JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype);
static GCTracer* tracer_;
@@ -1257,10 +1298,6 @@ class Heap : public AllStatic {
// Flush the number to string cache.
static void FlushNumberStringCache();
- // Flush code from functions we do not expect to use again. The code will
- // be replaced with a lazy compilable version.
- static void FlushCode();
-
static void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
@@ -1317,11 +1354,15 @@ class Heap : public AllStatic {
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
+ friend class MarkCompactCollector;
};
class HeapStats {
public:
+ static const int kStartMarker = 0xDECADE00;
+ static const int kEndMarker = 0xDECADE01;
+
int* start_marker; // 0
int* new_space_size; // 1
int* new_space_capacity; // 2
@@ -1861,7 +1902,7 @@ class TranscendentalCache {
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
- static inline Object* Get(Type type, double input) {
+ MUST_USE_RESULT static inline Object* Get(Type type, double input) {
TranscendentalCache* cache = caches_[type];
if (cache == NULL) {
caches_[type] = cache = new TranscendentalCache(type);
@@ -1874,7 +1915,7 @@ class TranscendentalCache {
static void Clear();
private:
- inline Object* Get(double input) {
+ MUST_USE_RESULT inline Object* Get(double input) {
Converter c;
c.dbl = input;
int hash = Hash(c);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 35a90a4aca..a095ef7bf4 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
namespace v8 {
@@ -95,10 +96,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc+1.
- __ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -567,9 +564,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
@@ -700,17 +696,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
- __ mov(result,
- Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
// Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -1100,7 +1085,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the Array function.
- GenerateLoadArrayFunction(masm, edi);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map.
@@ -1136,7 +1121,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which
// does always have a map.
- GenerateLoadArrayFunction(masm, ebx);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx);
__ cmp(edi, Operand(ebx));
__ Assert(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map.
@@ -1160,6 +1145,131 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ __ IncrementCounter(&Counters::string_ctor_calls, 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
+ __ cmp(edi, Operand(ecx));
+ __ Assert(equal, "Unexpected String function");
+ }
+
+ // Load the first argument into eax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ test(eax, Operand(eax));
+ __ j(zero, &no_arguments);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ push(ecx);
+ __ mov(eax, ebx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ false, // Input is known to be smi?
+ &not_cached);
+ __ IncrementCounter(&Counters::string_ctor_cached_number, 1);
+ __ bind(&argument_is_string);
+ // ----------- S t a t e -------------
+ // -- ebx : argument converted to string
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into eax.
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
+ JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(equal, "Unexpected string wrapper instance size");
+ __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
+ __ Assert(equal, "Unexpected unused properties of string wrapper");
+ }
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+
+ // Set properties and elements.
+ __ Set(ecx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+
+ // Set the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ mov(ebx, eax);
+ __ IncrementCounter(&Counters::string_ctor_string_value, 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into ebx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(&Counters::string_ctor_conversions, 1);
+ __ EnterInternalFrame();
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ __ LeaveInternalFrame();
+ __ mov(ebx, eax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into ebx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ Set(ebx, Immediate(Factory::empty_string()));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ push(ecx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(&Counters::string_ctor_gc_required, 1);
+ __ EnterInternalFrame();
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ ret(0);
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, Operand(esp));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
new file mode 100644
index 0000000000..366b91ef60
--- /dev/null
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -0,0 +1,4615 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
+#include "code-stubs.h"
+#include "bootstrapper.h"
+#include "jsregexp.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in esi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function info from the stack.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+ __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(Factory::the_hole_value()));
+ __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
+ __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
+ __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(ecx); // Temporarily remove return address.
+ __ pop(edx);
+ __ push(esi);
+ __ push(edx);
+ __ push(ecx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+
+ // Setup the fixed slots.
+ __ xor_(ebx, Operand(ebx)); // Set to NULL.
+ __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
+
+ // Copy the global object from the surrounding context. We go through the
+ // context in the function (ecx) to match the allocation behavior we have
+ // in the runtime system (see Heap::AllocateFunctionContext).
+ __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
+ __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+
+ // Initialize the rest of the slots to undefined.
+ __ mov(ebx, Factory::undefined_value());
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ mov(esi, Operand(eax));
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: constant elements.
+ // [esp + (2 * kPointerSize)]: literal index.
+ // [esp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Factory::undefined_value());
+ __ j(equal, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Handle<Map> expected_map;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map = Factory::fixed_array_map();
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map = Factory::fixed_cow_array_map();
+ }
+ __ push(ecx);
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
+ __ Assert(equal, message);
+ __ pop(ecx);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ __ fldz();
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in eax.
+ __ bind(&true_result);
+ __ mov(eax, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ mov(eax, 0);
+ __ ret(1 * kPointerSize);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(Immediate(right));
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(Immediate(left));
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ mov(right_arg, right);
+ __ mov(left_arg, Immediate(left));
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+
+ // Test if operands are smis or heap numbers and load them
+ // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm);
+
+ // Test if operands are numbers (smi or HeapNumber objects), and load
+ // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
+ // either operand is not a number. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+};
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
+
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ __ or_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_AND) {
+ __ and_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_XOR) {
+ __ xor_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ }
+ }
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
+ switch (op_) {
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
+ break;
+
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
+ break;
+
+ default:
+ break;
+ }
+
+ // 3. Perform the smi check of the operands.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis, not_taken);
+
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
+ case Token::SUB:
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
+ // Remove tag from one of the operands (but keep sign).
+ __ SmiUntag(right);
+ // Do multiplication.
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ break;
+
+ case Token::DIV:
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+ // Check that the remainder is zero.
+ __ test(edx, Operand(edx));
+ __ j(not_zero, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(eax);
+ break;
+
+ case Token::MOD:
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &not_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
+ // Move remainder to register eax.
+ __ mov(eax, edx);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ break;
+ }
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
+ switch (op_) {
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
+ break;
+ }
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+ // Generate fast case smi code if requested. This flag is set when the fast
+ // case smi code is not generated by the caller. Generating it here will speed
+ // up common operations.
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(edx);
+ __ AbortIfNotSmi(eax);
+ }
+ FloatingPointHelper::LoadSSE2Smis(masm, ecx);
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm);
+ }
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ } else { // SSE2 not available, use FPU.
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+ }
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
+ }
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // Try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm,
+ static_operands_type_,
+ use_sse3_,
+ &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ GenerateReturn(masm);
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+
+ // Avoid hitting the string ADD code below when allocation fails in
+ // the floating point code above.
+ if (op_ != Token::ADD) {
+ __ bind(&call_runtime);
+ }
+
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in edx, eax
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+ if (HasArgsReversed()) {
+ lhs = eax;
+ rhs = edx;
+ } else {
+ lhs = edx;
+ rhs = eax;
+ }
+
+ // Test if left operand is a string.
+ Label lhs_not_string;
+ __ test(lhs, Immediate(kSmiTagMask));
+ __ j(zero, &lhs_not_string);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &lhs_not_string);
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ __ TailCallStub(&string_add_left_stub);
+
+ Label call_runtime_with_args;
+ // Left operand is not a string, test right.
+ __ bind(&lhs_not_string);
+ __ test(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &call_runtime_with_args);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime_with_args);
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ __ TailCallStub(&string_add_right_stub);
+
+ // Neither argument is a string.
+ __ bind(&call_runtime);
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+ __ bind(&call_runtime_with_args);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ ASSERT(!HasArgsInRegisters());
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(ecx);
+ if (HasArgsReversed()) {
+ __ push(eax);
+ __ push(edx);
+ } else {
+ __ push(edx);
+ __ push(eax);
+ }
+ __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ __ pop(ecx); // Save return address.
+
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // esp[4]: argument (should be number).
+ // esp[0]: return address.
+ // Test that eax is a number.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the low and high words of the double into ebx, edx.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ sar(eax, 1);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ mov(Operand(esp, 0), eax);
+ __ fild_s(Operand(esp, 0));
+ __ fst_d(Operand(esp, 0));
+ __ pop(edx);
+ __ pop(ebx);
+ __ jmp(&loaded);
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // low and high words into ebx, edx.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+
+ __ bind(&loaded);
+ // ST[0] == double value
+ // ebx = low 32 bits of double value
+ // edx = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ mov(ecx, ebx);
+ __ xor_(ecx, Operand(edx));
+ __ mov(eax, ecx);
+ __ sar(eax, 16);
+ __ xor_(ecx, Operand(eax));
+ __ mov(eax, ecx);
+ __ sar(eax, 8);
+ __ xor_(ecx, Operand(eax));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // ebx = low 32 bits of double value.
+ // edx = high 32 bits of double value.
+ // ecx = TranscendentalCache::hash(double value).
+ __ mov(eax,
+ Immediate(ExternalReference::transcendental_cache_array_address()));
+ // Eax points to cache array.
+ __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // Eax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+ // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ lea(ecx, Operand(eax, ecx, times_4, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmp(ebx, Operand(ecx, 0));
+ __ j(not_equal, &cache_miss);
+ __ cmp(edx, Operand(ecx, kIntSize));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ mov(eax, Operand(ecx, 2 * kIntSize));
+ __ fstp(0);
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ // We are short on registers, so use no_reg as scratch.
+ // This gives slightly larger code.
+ __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+ GenerateOperation(masm);
+ __ mov(Operand(ecx, 0), ebx);
+ __ mov(Operand(ecx, kIntSize), edx);
+ __ mov(Operand(ecx, 2 * kIntSize), eax);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+ // Only free register is edi.
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ mov(edi, edx);
+ __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ int supported_exponent_limit =
+ (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+ __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ j(below, &in_range, taken);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmp(Operand(edi), Immediate(0x7ff00000));
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, taken);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ // NaN is represented by 0x7ff8000000000000.
+ __ push(Immediate(0x7ff80000));
+ __ push(Immediate(0));
+ __ fld_d(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ mov(edi, eax); // Save eax before using fnstsw_ax.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ test(Operand(eax), Immediate(5));
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ __ fstp(0);
+ __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
+
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+ return;
+ }
+ if (!type_info.IsInteger32() || !use_sse3) {
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ }
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ if (!type_info.IsInteger32()) {
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ }
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ if (!type_info.IsDouble()) {
+ if (!type_info.IsSmi()) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(edx);
+ }
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+ }
+
+ __ bind(&arg1_is_object);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ if (!type_info.IsDouble()) {
+ // Test if arg2 is a Smi.
+ if (!type_info.IsSmi()) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ }
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+ }
+
+ __ bind(&arg2_is_object);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ // Test if arg1 is a Smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm,
+ edx,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+
+ // Test if arg2 is a Smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ if (type_info.IsNumber()) {
+ LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
+ } else {
+ LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
+ }
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ test(number, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi, not_taken);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ SmiUntag(number);
+ __ push(number);
+ __ fild_s(Operand(esp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+ Label load_smi_edx, load_eax, load_smi_eax, done;
+ // Load operand in edx into xmm0.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ // Load operand in edx into xmm0, or branch to not_numbers.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, not_numbers); // Argument in edx is not a number.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1, or branch to not_numbers.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(equal, &load_float_eax);
+ __ jmp(not_numbers); // Argument in eax is not a number.
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+ __ jmp(&done);
+ __ bind(&load_float_eax);
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in eax is OK
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
+
+ if (negative_zero_ == kStrictNegativeZero) {
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+ }
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(no_overflow, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ CpuFeatures::IsSupported(SSE3),
+ &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (overwrite_ == UNARY_NO_OVERWRITE) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in edx and the parameter count is in eax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(edx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebp, eax, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(edx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(ebx); // Return address.
+ __ push(edx);
+ __ push(ebx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[16] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &add_arguments_object);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
+ __ mov(ebx, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ j(above, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the second argument is a string.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to ebx.
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+
+ // ebx: Length of subject string as a smi
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ cmp(eax, Operand(ebx));
+ __ j(above_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ and_(ebx,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask);
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // eax: subject string (flat ascii)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string (flat two byte)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(edx, CODE_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
+
+ // eax: subject string
+ // ebx: previous index
+ // edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ mov(Operand(esp, 5 * kPointerSize), ecx);
+
+ // Argument 5: static offsets vector buffer.
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector()));
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &setup_two_byte);
+ __ SmiUntag(edi);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest);
+
+ __ bind(&setup_two_byte);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+
+ __ bind(&setup_rest);
+
+ // Argument 2: Previous index.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+ // Argument 1: Subject string.
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+
+ // Locate the code entry and call it.
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(edx, kRegExpExecuteArguments);
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ j(equal, &success, taken);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure, taken);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(Operand(eax), Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(Operand(edx), Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
+ __ SmiTag(edi);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+ __ mov(number_string_cache,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ __ sub(Operand(mask), Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ if (object_is_smi) {
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ } else {
+ Label not_smi, hash_calculated;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smi);
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ jmp(&smi_hash_calculated);
+ __ bind(&not_smi);
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ Register probe = mask;
+ __ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(probe, Immediate(kSmiTagMask));
+ __ j(zero, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ } else {
+ __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ __ FCmp();
+ }
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ mov(ebx, Operand(esp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Identical objects can be compared fast, but there are some tricky cases
+ // for NaN and undefined.
+ {
+ Label not_identical;
+ __ cmp(eax, Operand(edx));
+ __ j(not_equal, &not_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, &check_for_nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ } else {
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc_ == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ } else {
+ Label nan;
+ __ j(above_equal, &nan);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ }
+ }
+
+ __ bind(&not_identical);
+ }
+
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc_ == equal && strict_) {
+ Label slow; // Fallthrough label.
+ Label not_smis;
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
+ __ j(not_zero, &not_smis);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ STATIC_ASSERT(kSmiTagMask == 1);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(Operand(ecx), Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
+ // if eax was smi, ebx is now edx, else eax.
+
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object);
+
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
+
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, Operand(ecx));
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, Operand(ecx));
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, not_taken);
+ __ j(above, &above_label, not_taken);
+
+ __ xor_(eax, Operand(eax));
+ __ ret(0);
+
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+ }
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_both_objects);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &not_both_objects);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(below, &not_both_objects);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0); // rax, rdx were pushed
+ __ bind(&not_both_objects);
+ }
+
+ // Push arguments below the return address.
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(eax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(eax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &receiver_is_value, not_taken);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow, not_taken);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
+ __ Set(eax, Immediate(argc_));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // eax holds the exception.
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Restore next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // Remove state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of
+ // a JS entry frame.
+ __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
+ Label skip;
+ __ cmp(ebp, 0);
+ __ j(equal, &skip, not_taken);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_BSD_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label empty_handle;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(kStackSpace, kArgc);
+ STATIC_ASSERT(kArgc == 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &empty_handle, not_taken);
+ // It was non-zero. Dereference to get the result value.
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame();
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ bind(&empty_handle);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // eax: result parameter for PerformGC, if any
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: pointer to the first argument (C callee-saved)
+
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack alignment is known to be correct. This function takes one argument
+ // which is passed on the stack, and we know that the stack has been
+ // prepared to pass at least one argument.
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
+ __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ inc(Operand::StaticVariable(scope_depth));
+ }
+
+ // Call C function.
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ call(Operand(ebx));
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ if (always_allocate_scope) {
+ __ dec(Operand::StaticVariable(scope_depth));
+ }
+
+ // Make sure we're not trying to return 'the hole' from the runtime
+ // call as this may lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &okay);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ lea(ecx, Operand(eax, 1));
+ // Lower 2 bits of ecx are 0 iff eax has failure tag.
+ __ test(ecx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned, not_taken);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame();
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry, taken);
+
+ // Special handling of out of memory exceptions.
+ __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, Factory::termination_exception());
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ mov(esp, Operand(esp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(eax, false);
+ __ mov(Operand::StaticVariable(external_caught), eax);
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ }
+
+ // Clear the context pointer.
+ __ xor_(esi, Operand(esi));
+
+ // Restore fp from handler and discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // State.
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects instead
+ // of a proper result. The builtin entry handles this by performing
+ // a garbage collection and retrying the builtin (twice).
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // eax: result parameter for PerformGC, if any (setup below)
+ // ebx: pointer to builtin function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: argv pointer (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Push marker in two places.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(not_equal, &not_outermost_js);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ bind(&not_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. Notice that we
+ // cannot store a reference to the trampoline code directly in this
+ // stub, because the builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(edx, Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(edx, Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(Operand(edx));
+
+ // Unlink this frame from the handler chain.
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Pop next_sp.
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+ __ j(not_equal, &not_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the left hand is a JS object.
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
+
+ // Get the prototype of the function.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ // edx is function, eax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+
+ // Register mapping:
+ // eax is object map.
+ // edx is function.
+ // ebx is function prototype.
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+
+ __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(ecx, Operand(ebx));
+ __ j(equal, &is_instance);
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ j(equal, &is_not_instance);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ Set(eax, Immediate(0));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s",
+ cc_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object_, Immediate(kSmiTagMask));
+ __ j(zero, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(index_, Immediate(kSmiTagMask));
+ __ j(not_zero, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ test(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ test(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzx_w(result_, FieldOperand(object_,
+ scratch_, times_1, // Scratch is smi-tagged.
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ SmiUntag(scratch_);
+ __ movzx_b(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, eax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(scratch_, Immediate(kSmiTagMask));
+ __ j(not_zero, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ test(code_,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case_, not_taken);
+
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ascii char code.
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, Factory::undefined_value());
+ __ j(equal, &slow_case_, not_taken);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
+ // Load the two arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (flags_ == NO_STRING_ADD_FLAGS) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
+ }
+
+ // Both arguments are strings.
+ // eax: first string
+ // edx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in eax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in edx.
+ __ mov(eax, edx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // eax: first string
+ // ebx: length of first string as a smi
+ // ecx: length of second string as a smi
+ // edx: second string
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ __ bind(&both_not_zero_length);
+ __ add(ebx, Operand(ecx));
+ STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
+ // Handle exceptionally long strings in the runtime system.
+ __ j(overflow, &string_add_runtime);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+ &string_add_runtime);
+
+ // Get the two characters forming the new string.
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_two_character_string_no_reload;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi,
+ &make_two_character_string_no_reload, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Allocate a two character string.
+ __ bind(&make_two_character_string);
+ // Reload the arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+ // Get the two characters forming the new string.
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ bind(&make_two_character_string_no_reload);
+ __ IncrementCounter(&Counters::string_add_make_two_char, 1);
+ __ AllocateAsciiString(eax, // Result.
+ 2, // Length.
+ edi, // Scratch 1.
+ edx, // Scratch 2.
+ &string_add_runtime);
+ // Pack both characters in ebx.
+ __ shl(ecx, kBitsPerByte);
+ __ or_(ebx, Operand(ecx));
+ // Set the characters in the new string.
+ __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ j(below, &string_add_flat_result);
+
+ // If result is not supposed to be flat allocate a cons string object. If both
+ // strings are ascii the result is an ascii cons string.
+ Label non_ascii, allocated, ascii_data;
+ __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ and_(ecx, Operand(edi));
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ __ bind(&ascii_data);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
+ __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
+ __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ mov(eax, ecx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // ecx: first instance type AND second instance type.
+ // edi: second instance type.
+ __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, Operand(ecx));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&string_add_flat_result);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ Label non_ascii_string_add_flat_result;
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &string_add_runtime);
+
+ // Both strings are ascii strings. As they are short they are both flat.
+ // ebx: length of resulting flat string as a smi
+ __ SmiUntag(ebx);
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // eax: first string - known to be two byte
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ SmiUntag(ebx);
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ test(arg, Immediate(kSmiTagMask));
+ __ j(zero, &not_string);
+ __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
+ __ j(below, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ mov(Operand(esp, stack_offset), arg);
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ test(arg, Immediate(kSmiTagMask));
+ __ j(zero, slow);
+ __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
+ __ j(not_equal, slow);
+ __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ j(zero, slow);
+ __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
+ __ mov(Operand(esp, stack_offset), arg);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ } else {
+ __ mov_w(scratch, Operand(src, 0));
+ __ mov_w(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
+ }
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords.
+ // The destination is aligned on a 4 byte boundary because we are
+ // copying to the beginning of a newly allocated string.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ cld();
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_probed,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ mov(scratch, c1);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ mov(scratch, c2);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_probed);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, kBitsPerByte);
+ __ or_(chars, Operand(c2));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+ __ mov(symbol_table,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ SmiUntag(mask);
+ __ sub(Operand(mask), Immediate(1));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // symbol_table: symbol table
+ // mask: capacity mask
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ mov(scratch, hash);
+ if (i > 0) {
+ __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(mask));
+
+ // Load the entry from the symbol table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ mov(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, Factory::undefined_value());
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ cmp(FieldOperand(candidate, String::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &next_probe[i]);
+
+ // As we are out of registers save the mask on the stack and use that
+ // register as a temporary.
+ __ push(mask);
+ Register temp = mask;
+
+ // Check that the candidate is a non-external ascii string.
+ __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe_pop_mask[i]);
+
+ // Check if the two characters match.
+ __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ and_(temp, 0x0000ffff);
+ __ cmp(chars, Operand(temp));
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe_pop_mask[i]);
+ __ pop(mask);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ pop(mask); // Pop saved mask from the stack.
+ if (!result.is(eax)) {
+ __ mov(eax, result);
+ }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ mov(hash, character);
+ __ shl(hash, 10);
+ __ add(hash, Operand(character));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, Operand(character));
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ sar(scratch, 11);
+ __ xor_(hash, Operand(scratch));
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, Operand(scratch));
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ test(hash, Operand(hash));
+ __ j(not_zero, &hash_not_zero);
+ __ mov(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+
+ // Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ sub(ecx, Operand(edx));
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label return_eax;
+ __ j(equal, &return_eax);
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ SmiUntag(ecx); // Result length is no longer smi.
+ __ cmp(ecx, 2);
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (value is 2)
+ // edx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiUntag(edx); // From index is no longer smi.
+ __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx,
+ FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi,
+ &make_two_character_string, &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ Set(ecx, Immediate(2));
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ SmiUntag(ebx);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&non_ascii_flat);
+ // eax: string
+ // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // ecx: result string length
+ // Check for flat two byte string
+ __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+
+ __ bind(&return_eax);
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+
+ // Find minimum length.
+ Label left_shorter;
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ SmiUntag(min_length);
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
+ }
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ __ test(length_delta, Operand(length_delta));
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&result_not_equal);
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+
+ // Compare flat ascii strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(ecx);
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
new file mode 100644
index 0000000000..351636faf7
--- /dev/null
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -0,0 +1,376 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_CODE_STUBS_IA32_H_
+#define V8_IA32_CODE_STUBS_IA32_H_
+
+#include "macro-assembler.h"
+#include "code-stubs.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags,
+ TypeInfo operands_type)
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false),
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ if (static_operands_type_.IsSmi()) {
+ mode_ = NO_OVERWRITE;
+ }
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ use_sse3_(SSE3Bits::decode(key)),
+ static_operands_type_(TypeInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(runtime_operands_type),
+ name_(NULL) {
+ }
+
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ bool ArgsInRegistersSupported() {
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
+ }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
+ bool use_sse3_;
+
+ // Number type information of operands, determined by code generator.
+ TypeInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_),
+ static_operands_type_.ToString());
+ }
+#endif
+
+ // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+ class ArgsReversedBits: public BitField<bool, 11, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_)
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of above.
+ bool ascii);
+
+ // Probe the symbol table for a two character string. If the string
+ // requires non-standard hashing a jump to the label not_probed is
+ // performed and registers c1 and c2 are preserved. In all other
+ // cases they are clobbered. If the string is not found by probing a
+ // jump to the label not_found is performed. This jump does not
+ // guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in
+ // register eax.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_probed,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return flags_; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow);
+
+ const StringAddFlags flags_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {
+ }
+
+ // Compare two flat ascii strings and returns result in eax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index cc89cc7db5..854052a65b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -29,8 +29,9 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "bootstrapper.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
@@ -934,97 +935,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
}
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
-
- // Test if operands are smis or heap numbers and load them
- // into xmm0 and xmm1 if they are. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-};
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
// Perform or call the specialized stub for a binary operation. Requires the
// three registers left, right and dst to be distinct and spilled. This
// deferred operation has up to three entry points: The main one calls the
@@ -1501,12 +1411,12 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
answer = frame_->CallStub(&stub, 2);
} else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
}
} else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
}
answer.set_type_info(TypeInfo::String());
frame_->Push(&answer);
@@ -1541,7 +1451,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
false, overwrite_mode);
@@ -1564,7 +1474,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_GENERIC_BINARY_FLAGS,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
}
}
@@ -1573,6 +1483,20 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
}
+Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right) {
+ if (stub->ArgsInRegistersSupported()) {
+ stub->SetArgsInRegisters();
+ return frame_->CallStub(stub, left, right);
+ } else {
+ frame_->Push(left);
+ frame_->Push(right);
+ return frame_->CallStub(stub, 2);
+ }
+}
+
+
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
Object* answer_object = Heap::undefined_value();
switch (op) {
@@ -2772,41 +2696,6 @@ void CodeGenerator::Comparison(AstNode* node,
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
- dest->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- dest->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
} else if (left_side_constant_1_char_string ||
right_side_constant_1_char_string) {
if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
@@ -3423,8 +3312,10 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(zero, &build_args);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args);
+ __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
+ __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
+ __ cmp(Operand(ecx), Immediate(apply_code));
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -5520,9 +5411,12 @@ void DeferredRegExpLiteral::Generate() {
class DeferredAllocateInNewSpace: public DeferredCode {
public:
- DeferredAllocateInNewSpace(int size, Register target)
- : size_(size), target_(target) {
+ DeferredAllocateInNewSpace(int size,
+ Register target,
+ int registers_to_save = 0)
+ : size_(size), target_(target), registers_to_save_(registers_to_save) {
ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
+ ASSERT_EQ(0, registers_to_save & target.bit());
set_comment("[ DeferredAllocateInNewSpace");
}
void Generate();
@@ -5530,15 +5424,28 @@ class DeferredAllocateInNewSpace: public DeferredCode {
private:
int size_;
Register target_;
+ int registers_to_save_;
};
void DeferredAllocateInNewSpace::Generate() {
+ for (int i = 0; i < kNumRegs; i++) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ push(save_register);
+ }
+ }
__ push(Immediate(Smi::FromInt(size_)));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
if (!target_.is(eax)) {
__ mov(target_, eax);
}
+ for (int i = kNumRegs - 1; i >= 0; i--) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ pop(save_register);
+ }
+ }
}
@@ -5712,12 +5619,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->depth() > 1) {
+ if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ clone = frame_->CallStub(&stub, 3);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
}
frame_->Push(&clone);
@@ -5727,12 +5640,9 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i);
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+ if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
+ continue;
+ }
// The property must be set by generated code.
Load(value);
@@ -5796,12 +5706,9 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5888,12 +5795,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame()->Push(&value);
Load(node->value());
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5991,11 +5895,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -6407,11 +6308,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -6424,8 +6324,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// constructor invocation.
CodeForSourcePosition(node->position());
Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
@@ -7359,6 +7258,88 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ Result object_result = frame_->Pop();
+ object_result.ToRegister(eax);
+ object_result.Unuse();
+ {
+ VirtualFrame::SpilledScope spilled_scope;
+
+ Label done;
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // Load JSRegExpResult map into edx.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+ __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ cmp(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ j(not_equal, &done);
+
+ if (FLAG_debug_code) {
+ // Check that object really has empty properties array, as the map
+ // should guarantee.
+ __ cmp(FieldOperand(eax, JSObject::kPropertiesOffset),
+ Immediate(Factory::empty_fixed_array()));
+ __ Check(equal, "JSRegExpResult: default map but non-empty properties.");
+ }
+
+ DeferredAllocateInNewSpace* allocate_fallback =
+ new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
+ ebx,
+ edx.bit() | eax.bit());
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ ebx,
+ ecx,
+ no_reg,
+ allocate_fallback->entry_label(),
+ TAG_OBJECT);
+ __ bind(allocate_fallback->exit_label());
+
+ // Copy all fields from eax to ebx.
+ STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0);
+ // There is an even number of fields, so unroll the loop once
+ // for efficiency.
+ for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) {
+ STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0);
+ if (i != JSObject::kMapOffset) {
+ // The map was already loaded into edx.
+ __ mov(edx, FieldOperand(eax, i));
+ }
+ __ mov(ecx, FieldOperand(eax, i + kPointerSize));
+
+ STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0);
+ if (i == JSObject::kElementsOffset) {
+ // If the elements array isn't empty, make it copy-on-write
+ // before copying it.
+ Label empty;
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ j(equal, &empty);
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_cow_array_map()));
+ __ bind(&empty);
+ }
+ __ mov(FieldOperand(ebx, i), edx);
+ __ mov(FieldOperand(ebx, i + kPointerSize), ecx);
+ }
+ __ mov(eax, ebx);
+
+ __ bind(&done);
+ }
+ frame_->Push(eax);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
@@ -7601,7 +7582,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
KeyedLoadIC::kSlowCaseBitFieldMask);
deferred->Branch(not_zero);
- // Check the object's elements are in fast case.
+ // Check the object's elements are in fast case and writable.
__ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
@@ -7951,6 +7932,42 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(value.reg());
+ }
+
+ __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result string = frame_->Pop();
+ string.ToRegister();
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(string.reg());
+ }
+
+ Result number = allocator()->Allocate();
+ ASSERT(number.is_valid());
+ __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
+ __ IndexFromHash(number.reg(), number.reg());
+ string.Unuse();
+ frame_->Push(&number);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
ASSERT(!in_safe_int32_mode());
if (CheckForInlineRuntimeCall(node)) {
@@ -8114,9 +8131,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->Push(&value);
} else {
Load(node->expression());
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
@@ -8821,11 +8836,9 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ if (node->left()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ } else if (node->right()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_RIGHT;
}
@@ -9057,6 +9070,41 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ ASSERT(!in_safe_int32_mode());
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ cmp(operand.reg(), Factory::null_value());
+ if (node->is_strict()) {
+ operand.Unuse();
+ destination()->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ destination()->true_target()->Branch(equal);
+ __ cmp(operand.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ temp.Unuse();
+ operand.Unuse();
+ destination()->Split(not_zero);
+ }
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
@@ -9496,15 +9544,10 @@ Result CodeGenerator::EmitKeyedLoad() {
if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
}
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
+ // Get the elements array from the receiver.
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ Assert(equal, "JSObject with fast elements map has slow elements");
- }
+ __ AssertFastElements(elements.reg());
// Check that the key is within bounds.
__ cmp(key.reg(),
@@ -9787,4406 +9830,6 @@ void Reference::SetValue(InitState init_state) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ mov(ebx, Immediate(Factory::empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(Factory::the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Setup the fixed slots.
- __ xor_(ebx, Operand(ebx)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
- __ cmp(ecx, Factory::undefined_value());
- __ j(equal, &slow_case);
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // 'null' => false.
- __ cmp(eax, Factory::null_value());
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &true_result);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in eax.
- __ bind(&true_result);
- __ mov(eax, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(eax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
-
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
- }
- }
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
-
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
-
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
- Label not_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
-
- // Test if first argument is a string.
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &not_string1);
-
- // First argument is a string, test second.
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &string1);
-
- // First and second argument are strings. Jump to the string add stub.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, edi, ebx, ecx, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- __ pop(ecx); // Save return address.
-
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // esp[4]: argument (should be number).
- // esp[0]: return address.
- // Test that eax is a number.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- // ST[0] == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- __ mov(eax,
- Immediate(ExternalReference::transcendental_cache_array_address()));
- // Eax points to cache array.
- __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- __ fstp(0);
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- GenerateOperation(masm);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
- // Only free register is edi.
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, taken);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(equal, &load_float_eax);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
- }
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- Label undo;
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(no_overflow, &done, taken);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
- __ jmp(&slow);
-
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_ == UNARY_OVERWRITE) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow, not_taken);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
- &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, not_taken);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ jmp(&done);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (overwrite_ == UNARY_NO_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite eax until
- // we're sure we can do it without going through the slow case
- // that needs the value in eax.
- __ AllocateHeapNumber(ebx, edx, edi, &slow);
- __ mov(eax, Operand(ebx));
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- } else {
- UNIMPLEMENTED();
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[16] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, Operand(ecx));
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
- __ j(above, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ cmp(eax, Operand(ebx));
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
- __ j(greater, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ and_(ebx,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Factory::empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(edi, Immediate(1)); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(edi, Immediate(0)); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(edx, CODE_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // eax: subject string
- // edx: code
- // edi: encoding of subject string (1 if ascii, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 5 * kPointerSize), ecx);
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector()));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ test(edi, Operand(edi));
- __ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &setup_two_byte);
- __ SmiUntag(edi);
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Subject string.
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(edx, kRegExpExecuteArguments);
-
- // Check the result.
- Label success;
- __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ cmp(eax, Operand::StaticVariable(pending_exception));
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(Operand(eax), Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi, hash_calculated;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, Operand(edx));
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, &check_for_nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
- }
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, &not_smis);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
-
- __ xor_(eax, Operand(eax));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(eax);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(eax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // eax holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // Remove state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of
- // a JS entry frame.
- __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
- Label skip;
- __ cmp(ebp, 0);
- __ j(equal, &skip, not_taken);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly. If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
-#else
-static const bool kPassHandlesDirectly = false;
-#endif
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
- STATIC_ASSERT(kArgc == 4);
- if (kPassHandlesDirectly) {
- // When handles as passed directly we don't have to allocate extra
- // space for and pass an out parameter.
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
- } else {
- // The function expects three arguments to be passed but we allocate
- // four to get space for the output cell. The argument slots are filled
- // as follows:
- //
- // 3: output cell
- // 2: arguments pointer
- // 1: name
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function.
- __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
- __ mov(ebx, esp);
- __ add(Operand(ebx), Immediate(3 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
- }
- // Call the api function!
- __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
- __ cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
- __ j(not_equal, &promote_scheduled_exception, not_taken);
- if (!kPassHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- __ mov(eax, Operand(eax, 0));
- }
- // Check if the result handle holds 0.
- __ test(eax, Operand(eax));
- __ j(zero, &empty_handle, not_taken);
- // It was non-zero. Dereference to get the result value.
- __ mov(eax, Operand(eax, 0));
- __ bind(&prologue);
- __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
- __ ret(0);
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- __ bind(&empty_handle);
- // It was zero; the result is undefined.
- __ mov(eax, Factory::undefined_value());
- __ jmp(&prologue);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ call(Operand(ebx));
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- Label okay;
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &okay);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
-
- // Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, Factory::termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ mov(esp, Operand(esp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(eax, false);
- __ mov(Operand::StaticVariable(external_caught), eax);
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ mov(Operand::StaticVariable(pending_exception), eax);
- }
-
- // Clear the context pointer.
- __ xor_(esi, Operand(esi));
-
- // Restore fp from handler and discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // State.
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ push(Operand::StaticVariable(c_entry_fp));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. Notice that we
- // cannot store a reference to the trampoline code directly in this
- // stub, because the builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
-
- // Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- // Pop next_sp.
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the left hand is a JS object.
- __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
- // Get the prototype of the function.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
- // edx is function, eax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
-
- // Check that the function prototype is a JS object.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
-
- // Register mapping:
- // eax is object map.
- // edx is function.
- // ebx is function prototype.
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
-
- __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(ecx, Operand(ebx));
- __ j(equal, &is_instance);
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
- __ j(equal, &is_not_instance);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ Set(eax, Immediate(0));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ test(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ test(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiUntag(scratch_);
- __ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, eax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
-
- __ Set(result_, Immediate(Factory::single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, Factory::undefined_value());
- __ j(equal, &slow_case_, not_taken);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in eax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(ebx, Immediate(Smi::FromInt(2)));
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
- }
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // symbol_table: symbol table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(mask));
-
- // Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- __ cmp(candidate, Factory::undefined_value());
- __ j(equal, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ascii string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, Operand(character));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, Operand(character));
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, Operand(scratch));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ test(hash, Operand(hash));
- __ j(not_zero, &hash_not_zero);
- __ mov(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ sub(ecx, Operand(edx));
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // eax: string
- // ebx: instance type
- // ecx: sub string length (value is 2)
- // edx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiUntag(edx); // From index is no longer smi.
- __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx,
- FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
-
- __ bind(&result_longer_than_two);
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- __ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
-
- __ IncrementCounter(&Counters::string_compare_native, 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- __ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ SmiUntag(min_length);
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
- }
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&result_not_equal);
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ascii strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
#undef __
#define __ masm.
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 37b70110cc..adc00058df 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -574,6 +574,11 @@ class CodeGenerator: public AstVisitor {
void Int32BinaryOperation(BinaryOperation* node);
+ // Generate a stub call from the virtual frame.
+ Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right);
+
void Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -627,9 +632,6 @@ class CodeGenerator: public AstVisitor {
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
- static bool PatchInlineRuntimeEntry(Handle<String> name,
- const InlineRuntimeLUT& new_entry,
- InlineRuntimeLUT* old_entry);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -699,8 +701,14 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+ // Construct a RegExp exec result with two in-object properties.
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ // Clone the result of a regexp function.
+ // Must be an object created by GenerateRegExpConstructResult with
+ // no extra properties.
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
@@ -724,6 +732,9 @@ class CodeGenerator: public AstVisitor {
// Check whether two RegExps are equivalent
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -797,327 +808,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {
- }
-
- // Compare two flat ascii strings and returns result in eax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index b57cf3d07d..ee9456564c 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -94,22 +94,33 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs,
+ RegList object_regs,
+ RegList non_object_regs,
bool convert_call_to_jmp) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
// Enter an internal frame.
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- __ PushRegistersFromMemory(pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, "Unable to encode value as smi");
+ }
+ __ SmiTag(reg);
+ __ push(reg);
+ }
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -117,12 +128,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break()));
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- __ PopRegistersToMemory(pointer_regs);
+ // stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, Immediate(kDebugZapValue));
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ }
+ }
// Get rid of the internal frame.
__ LeaveInternalFrame();
@@ -130,12 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ pop(eax);
+ __ add(Operand(esp), Immediate(kPointerSize));
}
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -151,7 +172,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- eax : receiver
// -- ecx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
}
@@ -162,7 +183,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : name
// -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
@@ -172,7 +194,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// -- edx : receiver
// -- eax : key
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
}
@@ -183,19 +205,17 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : key
// -- edx : receiver
// -----------------------------------
- // Register eax contains an object that needs to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- ecx: name
// -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
}
@@ -204,10 +224,11 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// eax is the actual number of arguments not encoded as a smi see comment
// above IC call.
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
}
@@ -216,7 +237,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: return value
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
}
@@ -225,7 +246,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, 0, 0, false);
}
@@ -245,7 +266,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0, true);
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 212cfdeaa0..9baf76336b 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -35,21 +35,6 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute the stack pointer.
@@ -58,58 +43,11 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index cb36904ee3..1631b04327 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -216,12 +217,28 @@ void FullCodeGenerator::EmitReturnSequence() {
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
+ // We never generate inlined constant smi operations for these.
+ return kNoConstants;
+ } else if (right->IsSmiLiteral()) {
+ return kRightConstant;
+ } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
+ return kLeftConstant;
+ } else {
+ return kNoConstants;
+ }
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -246,20 +263,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -289,20 +293,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
Move(result_register(), slot);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(result_register(), slot);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -330,20 +321,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit->handle());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(result_register(), lit->handle());
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -371,20 +349,7 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ pop(result_register());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- __ mov(result_register(), Operand(esp, 0));
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -420,56 +385,7 @@ void FullCodeGenerator::DropAndApply(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ mov(result_register(), reg);
- __ mov(Operand(esp, 0), result_register());
- break;
- }
- DoTest(context);
- break;
- }
-}
-
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -510,32 +426,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- __ push(Immediate(Factory::true_value()));
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- __ push(Immediate(Factory::false_value()));
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -563,78 +453,19 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) __ push(Immediate(Factory::false_value()));
- break;
- }
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) __ push(Immediate(Factory::true_value()));
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
- __ jmp(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is in the accumulator. If the value might be needed
- // on the stack (value/test and test/value contexts with a stack location
- // desired), then the value is already duplicated on the stack.
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
- // In value/test and test/value expression contexts with stack as the
- // desired location, there is already an extra value on the stack. Use a
- // label to discard it if unneeded.
- Label discard;
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_false = &discard;
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_true = &discard;
- break;
- }
- break;
- }
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Emit the inlined tests assumed by the stub.
__ cmp(result_register(), Factory::undefined_value());
__ j(equal, if_false);
@@ -648,83 +479,28 @@ void FullCodeGenerator::DoTest(Expression::Context context) {
__ test(result_register(), Immediate(kSmiTagMask));
__ j(zero, if_true);
- // Save a copy of the value if it may be needed and isn't already saved.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- }
-
// Call the ToBoolean stub for all other cases.
ToBooleanStub stub;
__ push(result_register());
__ CallStub(&stub);
__ test(eax, Operand(eax));
- // The stub returns nonzero for true. Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ j(not_zero, true_label_);
- __ jmp(false_label_);
- break;
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ j(zero, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ j(not_zero, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ j(not_zero, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ j(zero, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
@@ -908,20 +684,21 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
+ // Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
- __ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(equal, true);
__ CallStub(&stub);
__ test(eax, Operand(eax));
@@ -1203,7 +980,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, Factory::undefined_value());
__ j(not_equal, &materialized);
@@ -1326,12 +1103,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_elements()));
- if (expr->depth() > 1) {
+ if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@@ -1385,10 +1168,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1399,57 +1183,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(eax); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1490,14 +1287,325 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- __ push(result_register());
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Unknown());
+void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ __ add(Operand(eax), Immediate(value));
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // Undo the optimistic add operation and call the shared stub.
+ __ bind(&call_stub);
+ __ sub(Operand(eax), Immediate(value));
+ Token::Value op = Token::ADD;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(eax);
+ } else {
+ __ push(eax);
+ __ push(Immediate(value));
+ }
__ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ if (left_is_constant_smi) {
+ __ mov(ecx, eax);
+ __ mov(eax, Immediate(value));
+ __ sub(Operand(eax), ecx);
+ } else {
+ __ sub(Operand(eax), Immediate(value));
+ }
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ __ bind(&call_stub);
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(ecx);
+ } else {
+ // Undo the optimistic sub operation.
+ __ add(Operand(eax), Immediate(value));
+
+ __ push(eax);
+ __ push(Immediate(value));
+ }
+
+ Token::Value op = Token::SUB;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ __ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label call_stub, smi_case, done;
+ int shift_value = value->value() & 0x1f;
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&call_stub);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ __ push(eax);
+ __ push(Immediate(value));
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SHL:
+ if (shift_value != 0) {
+ __ mov(edx, eax);
+ if (shift_value > 1) {
+ __ shl(edx, shift_value - 1);
+ }
+ // Convert int result to smi, checking that it is in int range.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ add(edx, Operand(edx));
+ __ j(overflow, &call_stub);
+ __ mov(eax, edx); // Put result back into eax.
+ }
+ break;
+ case Token::SAR:
+ if (shift_value != 0) {
+ __ sar(eax, shift_value);
+ __ and_(eax, ~kSmiTagMask);
+ }
+ break;
+ case Token::SHR:
+ if (shift_value < 2) {
+ __ mov(edx, eax);
+ __ SmiUntag(edx);
+ __ shr(edx, shift_value);
+ __ test(edx, Immediate(0xc0000000));
+ __ j(not_zero, &call_stub);
+ __ SmiTag(edx);
+ __ mov(eax, edx); // Put result back into eax.
+ } else {
+ __ SmiUntag(eax);
+ __ shr(eax, shift_value);
+ __ SmiTag(eax);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label smi_case, done;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ // The order of the arguments does not matter for bit-ops with a
+ // constant operand.
+ __ push(Immediate(value));
+ __ push(eax);
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::BIT_OR:
+ __ or_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_AND:
+ __ and_(Operand(eax), Immediate(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ EmitConstantSmiBitOp(expr, op, context, mode, value);
+ break;
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ ASSERT(!left_is_constant_smi);
+ EmitConstantSmiShiftOp(expr, op, context, mode, value);
+ break;
+ case Token::ADD:
+ EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value);
+ break;
+ case Token::SUB:
+ EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ if (constant == kRightConstant) {
+ Smi* value = Smi::cast(*right->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, false, value);
+ return;
+ } else if (constant == kLeftConstant) {
+ Smi* value = Smi::cast(*left->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, true, value);
+ return;
+ }
+
+ // Do combined smi check of the operands. Left operand is on the
+ // stack. Right operand is in eax.
+ Label done, stub_call, smi_case;
+ __ pop(edx);
+ __ mov(ecx, eax);
+ __ or_(eax, Operand(edx));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&stub_call);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (stub.ArgsInRegistersSupported()) {
+ stub.GenerateCall(masm_, edx, ecx);
+ } else {
+ __ push(edx);
+ __ push(ecx);
+ __ CallStub(&stub);
+ }
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ __ mov(eax, edx); // Copy left operand in case of a stub call.
+
+ switch (op) {
+ case Token::SAR:
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ sar_cl(eax); // No checks of result necessary
+ __ SmiTag(eax);
+ break;
+ case Token::SHL: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(positive, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::SHR: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shr_cl(eax);
+ __ test(eax, Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::ADD:
+ __ add(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::SUB:
+ __ sub(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(eax);
+ __ imul(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &done, taken);
+ __ mov(ebx, edx);
+ __ or_(ebx, Operand(ecx));
+ __ j(negative, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ or_(eax, Operand(ecx));
+ break;
+ case Token::BIT_AND:
+ __ and_(eax, Operand(ecx));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(eax, Operand(ecx));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode) {
+ TypeInfo type = TypeInfo::Unknown();
+ GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
+ if (stub.ArgsInRegistersSupported()) {
+ __ pop(edx);
+ stub.GenerateCall(masm_, edx, eax);
+ } else {
+ __ push(result_register());
+ __ CallStub(&stub);
+ }
Apply(context, eax);
}
@@ -1914,11 +2022,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
- VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForValue(expr->expression(), kStack);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -1931,16 +2039,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into edi and eax.
+ // Load function and argument count into edi and eax.
__ Set(eax, Immediate(arg_count));
- // Function is in esp[arg_count + 1].
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in eax, or pop it.
- DropAndApply(1, context_, eax);
+ Apply(context_, eax);
}
@@ -1952,11 +2057,12 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1970,11 +2076,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1988,7 +2095,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
@@ -2003,8 +2112,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, if_false);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, if_true);
- __ jmp(if_false);
+ Split(below_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2018,13 +2126,14 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(above_equal, if_true);
- __ jmp(if_false);
+ Split(above_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2038,15 +2147,16 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
- __ jmp(if_false);
+ Split(not_zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2061,7 +2171,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -2079,13 +2191,14 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2099,13 +2212,14 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2119,13 +2233,14 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2138,7 +2253,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2154,8 +2271,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2171,12 +2287,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(ebx);
__ cmp(eax, Operand(ebx));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2731,6 +2848,46 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ Split(zero, if_true, if_false, fall_through);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
+
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2831,19 +2988,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::undefined_value());
- break;
- case kStack:
- __ push(Immediate(Factory::undefined_value()));
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2852,45 +2997,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
-
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(eax, CodeGenerator::GlobalObject());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ call(ic, RelocInfo::CODE_TARGET);
- __ push(eax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, eax);
break;
@@ -2911,9 +3033,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2927,28 +3047,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register eax.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &call_stub);
+ __ lea(eax, Operand(eax, kSmiTagMask));
+ __ not_(eax);
+ __ jmp(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ jmp(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ not_(result_register());
- __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag.
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, eax);
break;
}
@@ -2960,6 +3078,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
@@ -3008,8 +3128,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ }
__ push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&no_conversion);
@@ -3024,8 +3146,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -3046,7 +3166,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
@@ -3132,68 +3252,117 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
}
}
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmp(obj, Operand(null_const));
- if (strict) {
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // Check for undetectable objects => false.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ Split(below, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(eax, Factory::true_value());
__ j(equal, if_true);
- } else {
+ __ cmp(eax, Factory::false_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(eax, Factory::undefined_value());
__ j(equal, if_true);
- __ cmp(obj, Factory::undefined_value());
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // Check for undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
- __ test(obj, Immediate(kSmiTagMask));
+ // Regular expressions => 'function' (they are callable).
+ __ CmpInstanceType(edx, JS_REGEXP_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
- // It can be an undetectable object.
- __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, edx);
+ __ j(equal, if_false);
+ // Check for undetectable objects => false.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ // Check for JS objects => true.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, if_false);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ Split(less_equal, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
@@ -3201,7 +3370,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
switch (expr->op()) {
@@ -3209,8 +3390,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
@@ -3218,8 +3398,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
break;
}
@@ -3227,28 +3407,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ: {
+ case Token::EQ:
cc = equal;
__ pop(edx);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, edx, eax, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, eax, edx, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = less;
__ pop(edx);
@@ -3275,23 +3441,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3301,6 +3465,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ cmp(eax, Factory::null_value());
+ if (expr->is_strict()) {
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ __ j(equal, if_true);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, if_true);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // It can be an undetectable object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(edx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 2cd41a15bb..3d0bd796a0 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -452,6 +452,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -468,8 +469,12 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// we fall through.
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
+ } else {
+ __ AssertFastElements(scratch);
+ }
// Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
__ j(above_equal, out_of_range);
@@ -514,31 +519,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(hash, String::kArrayIndexValueMask);
- __ shr(hash, String::kHashShift - kSmiTagSize);
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(key, hash);
-}
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -558,12 +538,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in ecx.
+ __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
+ 1 << Map::kHasFastElements);
+ __ j(zero, &check_pixel_array, not_taken);
+
GenerateFastArrayLoad(masm,
edx,
eax,
ecx,
eax,
- &check_pixel_array,
+ NULL,
&slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@@ -572,7 +558,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// edx: receiver
// eax: key
- // ecx: elements
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, eax);
__ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
@@ -693,7 +679,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, eax, ebx);
+ __ IndexFromHash(ebx, eax);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -967,7 +953,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edx: JSObject
// ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
+ // Check that the object is in fast mode and writable.
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
@@ -1023,8 +1009,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode; if it is the
- // length is always a smi.
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
__ bind(&array);
// eax: value
// edx: receiver, a JSArray
@@ -1554,7 +1540,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, ecx, ebx);
+ __ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1872,6 +1858,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ j(not_equal, &miss, not_taken);
// Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
__ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss, not_taken);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 24538461ff..87e25d73db 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -191,81 +191,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), reg);
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(reg, Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PushRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Push the content of the memory location to the stack.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- push(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PopRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Pop the content from the stack to the memory location.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- pop(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(scratch, Operand(base, 0));
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), scratch);
- lea(base, Operand(base, kPointerSize));
- }
- }
-}
-
void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
@@ -274,6 +199,7 @@ void MacroAssembler::DebugBreak() {
}
#endif
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, Operand(dst)); // shorter than mov
@@ -377,6 +303,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ test(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is not a string");
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(below, "Operand is not a string");
+}
+
+
void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is a smi");
@@ -405,7 +342,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
-void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
+
+void MacroAssembler::EnterExitFramePrologue() {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -413,7 +351,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
push(ebp);
mov(ebp, Operand(esp));
- // Reserve room for entry stack pointer and push the debug marker.
+ // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
@@ -425,21 +363,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
mov(Operand::StaticVariable(context_address), esi);
}
-void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // TODO(1243899): This should be symmetric to
- // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
- // correct here, but computed for the other call. Very error
- // prone! FIX THIS. Actually there are deeper problems with
- // register saving than this asymmetry (see the bug report
- // associated with this issue).
- PushRegistersFromMemory(kJSCallerSaved);
- }
-#endif
+void MacroAssembler::EnterExitFrameEpilogue(int argc) {
// Reserve space for arguments.
sub(Operand(esp), Immediate(argc * kPointerSize));
@@ -455,44 +380,30 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
- EnterExitFramePrologue(mode);
+void MacroAssembler::EnterExitFrame() {
+ EnterExitFramePrologue();
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
- EnterExitFrameEpilogue(mode, 2);
+ EnterExitFrameEpilogue(2);
}
-void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+void MacroAssembler::EnterApiExitFrame(int stack_space,
int argc) {
- EnterExitFramePrologue(mode);
+ EnterExitFramePrologue();
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
- EnterExitFrameEpilogue(mode, argc);
+ EnterExitFrameEpilogue(argc);
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // It's okay to clobber register ebx below because we don't need
- // the function pointer after this.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- lea(ebx, Operand(ebp, kOffset));
- CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
- }
-#endif
-
+void MacroAssembler::LeaveExitFrame() {
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -871,6 +782,31 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
+void MacroAssembler::AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(length > 0);
+
+ // Allocate ascii string in new space.
+ AllocateInNewSpace(SeqAsciiString::SizeFor(length),
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::ascii_string_map()));
+ mov(FieldOperand(result, String::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
void MacroAssembler::AllocateConsString(Register result,
Register scratch1,
Register scratch2,
@@ -1040,6 +976,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ and_(hash, String::kArrayIndexValueMask);
+ STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
+ if (String::kHashShift > kSmiTagSize) {
+ shr(hash, String::kHashShift - kSmiTagSize);
+ }
+ if (!index.is(hash)) {
+ mov(index, hash);
+ }
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
@@ -1298,11 +1253,10 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
- mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
- lea(edx, FieldOperand(edx, Code::kHeaderSize));
ParameterCount expected(ebx);
- InvokeCode(Operand(edx), expected, actual, flag);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag);
}
@@ -1313,7 +1267,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1329,33 +1282,26 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(edx, id);
- InvokeCode(Operand(edx), expected, expected, flag);
+ GetBuiltinFunction(edi, id);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, expected, flag);
}
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(edi));
-
- // Load the builtins object into target register.
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the JavaScript builtin function from the builtins object.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ mov(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
- mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-
- // Load the code entry point from the builtins object.
- mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(target);
- mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
- cmp(target, Operand(esp, 0));
- Assert(equal, "Builtin code object changed");
- pop(target);
- }
- lea(target, FieldOperand(target, Code::kHeaderSize));
+ GetBuiltinFunction(edi, id);
+ // Load the code entry point from the function into the target register.
+ mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
@@ -1378,6 +1324,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ mov(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (FLAG_debug_code) {
+ Label ok, fail;
+ CheckMap(map, Factory::meta_map(), &fail, false);
+ jmp(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
+}
+
void MacroAssembler::Ret() {
ret(0);
@@ -1464,6 +1434,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ Label ok;
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_cow_array_map()));
+ j(equal, &ok);
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L, taken);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 0b16f0d40b..a7534cbd47 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -99,13 +99,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void PushRegistersFromMemory(RegList regs);
- void PopRegistersToMemory(RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -128,18 +121,25 @@ class MacroAssembler: public Assembler {
// Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(ExitFrame::Mode mode);
+ void EnterExitFrame();
- void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
+ void EnterApiExitFrame(int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame();
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -169,6 +169,9 @@ class MacroAssembler: public Assembler {
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
@@ -264,6 +267,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -350,6 +356,11 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
+ void AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
@@ -393,6 +404,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// ---------------------------------------------------------------------------
// Runtime calls
@@ -508,6 +525,8 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertFastElements(Register elements);
+
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
@@ -559,8 +578,8 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
- void EnterExitFramePrologue(ExitFrame::Mode mode);
- void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+ void EnterExitFramePrologue();
+ void EnterExitFrameEpilogue(int argc);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index a7930fb1ed..2aab7a8d94 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -31,11 +31,9 @@
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 {
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index b2c9dab83b..7fc3f8114d 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -257,16 +257,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
- // Load the global or builtins object from the current context.
- __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ mov(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ mov(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadGlobalFunction(index, prototype);
+ __ LoadGlobalFunctionInitialMap(prototype, prototype);
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -1366,16 +1358,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
+ Label call_builtin;
+
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
+ // Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &miss);
+ __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1456,10 +1450,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
}
+ __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
@@ -1511,10 +1504,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
+ // Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &miss);
+ __ j(not_equal, &call_builtin);
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index ff9132cf71..5f1e1e4e5e 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -1143,9 +1143,9 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
- PushElementAt(arg_count + 1);
+ PushElementAt(arg_count);
Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(edi);
// Constructors are called with the number of arguments in register
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 70bbaf8c96..94dbd5f51f 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -108,10 +108,10 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
}
-Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
+JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner)->map();
+ return JSObject::cast(map_owner);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index a5370a6f85..b4a333ec90 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -165,14 +165,14 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheMap is not applicable.
+ // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
- // IC::GetCodeCacheMap is not applicable.
+ // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
}
- Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
+ Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index a02f272fc7..17450cc356 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -123,8 +123,8 @@ class IC {
JSObject* holder);
static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
JSObject* holder);
- static inline Map* GetCodeCacheMap(Object* object,
- InlineCacheHolderFlag holder);
+ static inline JSObject* GetCodeCacheHolder(Object* object,
+ InlineCacheHolderFlag holder);
protected:
Address fp() const { return fp_; }
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index e7ec6100e5..a39d7c4a97 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -68,15 +68,13 @@ function JSONParse(text, reviver) {
}
var characterQuoteCache = {
+ '\b': '\\b', // ASCII 8, Backspace
+ '\t': '\\t', // ASCII 9, Tab
+ '\n': '\\n', // ASCII 10, Newline
+ '\f': '\\f', // ASCII 12, Formfeed
+ '\r': '\\r', // ASCII 13, Carriage Return
'\"': '\\"',
- '\\': '\\\\',
- '/': '\\/',
- '\b': '\\b',
- '\f': '\\f',
- '\n': '\\n',
- '\r': '\\r',
- '\t': '\\t',
- '\x0B': '\\u000b'
+ '\\': '\\\\'
};
function QuoteSingleJSONCharacter(c) {
@@ -95,7 +93,7 @@ function QuoteSingleJSONCharacter(c) {
}
function QuoteJSONString(str) {
- var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g;
+ var quotable = /[\\\"\x00-\x1f]/g;
return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index c9afc62e15..30d4dcbf23 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -380,10 +380,11 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
-RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> output) {
+RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Vector<int32_t> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0);
@@ -478,11 +479,9 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers);
- IrregexpResult res = IrregexpExecOnce(jsregexp,
- subject,
- previous_index,
- Vector<int>(registers.vector(),
- registers.length()));
+ IrregexpResult res = RegExpImpl::IrregexpExecOnce(
+ jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
+ registers.length()));
if (res == RE_SUCCESS) {
int capture_register_count =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
diff --git a/deps/v8/src/jump-target-heavy.h b/deps/v8/src/jump-target-heavy.h
index b2113a5a4c..8cec86926a 100644
--- a/deps/v8/src/jump-target-heavy.h
+++ b/deps/v8/src/jump-target-heavy.h
@@ -117,17 +117,17 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- virtual void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Hint hint = no_hint);
+ void Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
- virtual void Bind(Result* arg0, Result* arg1);
+ void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 769ac35c85..5a8749e513 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -739,7 +739,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- array->SetElement(i, *(info_wrapper.GetJSArray()));
+ SetElement(array, i, info_wrapper.GetJSArray());
}
}
@@ -750,7 +750,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
class ReferenceCollectorVisitor : public ObjectVisitor {
public:
explicit ReferenceCollectorVisitor(Code* original)
- : original_(original), rvalues_(10), reloc_infos_(10) {
+ : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
}
virtual void VisitPointers(Object** start, Object** end) {
@@ -761,7 +761,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
}
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ virtual void VisitCodeEntry(Address entry) {
+ if (Code::GetObjectFromEntryAddress(entry) == original_) {
+ code_entries_.Add(entry);
+ }
+ }
+
+ virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
reloc_infos_.Add(*rinfo);
@@ -778,8 +784,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
for (int i = 0; i < rvalues_.length(); i++) {
*(rvalues_[i]) = substitution;
}
+ Address substitution_entry = substitution->instruction_start();
for (int i = 0; i < reloc_infos_.length(); i++) {
- reloc_infos_[i].set_target_address(substitution->instruction_start());
+ reloc_infos_[i].set_target_address(substitution_entry);
+ }
+ for (int i = 0; i < code_entries_.length(); i++) {
+ Address entry = code_entries_[i];
+ Memory::Address_at(entry) = substitution_entry;
}
}
@@ -787,28 +798,10 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
Code* original_;
ZoneList<Object**> rvalues_;
ZoneList<RelocInfo> reloc_infos_;
+ ZoneList<Address> code_entries_;
};
-class FrameCookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::CookFramesForThread(top);
- }
-};
-
-class FrameUncookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::UncookFramesForThread(top);
- }
-};
-
-static void IterateAllThreads(ThreadVisitor* visitor) {
- Top::IterateThread(visitor);
- ThreadManager::IterateArchivedThreads(visitor);
-}
-
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
@@ -824,13 +817,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
- FrameCookingThreadVisitor cooking_visitor;
- IterateAllThreads(&cooking_visitor);
-
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
-
- FrameUncookingThreadVisitor uncooking_visitor;
- IterateAllThreads(&uncooking_visitor);
}
// Now iterate over all pointers of all objects, including code_target
@@ -1372,8 +1359,9 @@ static const char* DropActivationsInActiveThread(
for (int i = 0; i < array_len; i++) {
if (result->GetElement(i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- result->SetElement(i, Smi::FromInt(
- LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ Handle<Object> replaced(
+ Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ SetElement(result, i, replaced);
}
}
return NULL;
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index e083f01a44..0bca5ebd86 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
@@ -1266,7 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
case Code::BINARY_OP_IC:
// fall through
case Code::STUB:
- description = CodeStub::MajorName(code_object->major_key(), true);
+ description =
+ CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
if (description == NULL)
description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 686a61c367..d261f57da7 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -83,4 +83,31 @@ const int kInvalidProtoDepth = -1;
#error Unsupported target architecture.
#endif
+namespace v8 {
+namespace internal {
+
+// Support for "structured" code comments.
+#ifdef DEBUG
+
+class Comment {
+ public:
+ Comment(MacroAssembler* masm, const char* msg);
+ ~Comment();
+
+ private:
+ MacroAssembler* masm_;
+ const char* msg_;
+};
+
+#else
+
+class Comment {
+ public:
+ Comment(MacroAssembler*, const char*) {}
+};
+
+#endif // DEBUG
+
+} } // namespace v8::internal
+
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 643a2851a1..1ceb620146 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -120,7 +120,7 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 1a020e55d1..a9e852ef32 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "compilation-cache.h"
#include "execution.h"
#include "heap-profiler.h"
#include "global-handles.h"
@@ -84,11 +85,15 @@ void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
+ Heap::MarkMapPointersAsEncoded(true);
UpdatePointers();
+ Heap::MarkMapPointersAsEncoded(false);
+ PcToCodeCache::FlushPcToCodeCache();
RelocateObjects();
} else {
SweepSpaces();
+ PcToCodeCache::FlushPcToCodeCache();
}
Finish();
@@ -252,6 +257,14 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.GetVisitor(map)(map, obj);
}
+ static void EnableCodeFlushing(bool enabled) {
+ if (enabled) {
+ table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+ } else {
+ table_.Register(kVisitJSFunction, &VisitJSFunction);
+ }
+ }
+
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor,
@@ -289,6 +302,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.Register(kVisitCode, &VisitCode);
+ table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+
table_.Register(kVisitPropertyCell,
&FixedBodyVisitor<StaticMarkingVisitor,
JSGlobalPropertyCell::BodyDescriptor,
@@ -405,6 +420,160 @@ class StaticMarkingVisitor : public StaticVisitorBase {
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
}
+ // Code flushing support.
+
+ // How many collections newly compiled code object will survive before being
+ // flushed.
+ static const int kCodeAgeThreshold = 5;
+
+ inline static bool HasSourceCode(SharedFunctionInfo* info) {
+ Object* undefined = Heap::raw_unchecked_undefined_value();
+ return (info->script() != undefined) &&
+ (reinterpret_cast<Script*>(info->script())->source() != undefined);
+ }
+
+
+ inline static bool IsCompiled(JSFunction* function) {
+ return
+ function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ }
+
+
+ inline static bool IsCompiled(SharedFunctionInfo* function) {
+ return
+ function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ }
+
+
+ static void FlushCodeForFunction(JSFunction* function) {
+ SharedFunctionInfo* shared_info = function->unchecked_shared();
+
+ if (shared_info->IsMarked()) return;
+
+ // Special handling if the function and shared info objects
+ // have different code objects.
+ if (function->unchecked_code() != shared_info->unchecked_code()) {
+ // If the shared function has been flushed but the function has not,
+ // we flush the function if possible.
+ if (!IsCompiled(shared_info) &&
+ IsCompiled(function) &&
+ !function->unchecked_code()->IsMarked()) {
+ function->set_code(shared_info->unchecked_code());
+ }
+ return;
+ }
+
+ // Code is either on stack or in compilation cache.
+ if (shared_info->unchecked_code()->IsMarked()) {
+ shared_info->set_code_age(0);
+ return;
+ }
+
+ // The function must be compiled and have the source code available,
+ // to be able to recompile it in case we need the function again.
+ if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
+
+ // We never flush code for Api functions.
+ Object* function_data = shared_info->function_data();
+ if (function_data->IsHeapObject() &&
+ (SafeMap(function_data)->instance_type() ==
+ FUNCTION_TEMPLATE_INFO_TYPE)) {
+ return;
+ }
+
+ // Only flush code for functions.
+ if (shared_info->code()->kind() != Code::FUNCTION) return;
+
+ // Function must be lazy compilable.
+ if (!shared_info->allows_lazy_compilation()) return;
+
+ // If this is a full script wrapped in a function we do no flush the code.
+ if (shared_info->is_toplevel()) return;
+
+ // Age this shared function info.
+ if (shared_info->code_age() < kCodeAgeThreshold) {
+ shared_info->set_code_age(shared_info->code_age() + 1);
+ return;
+ }
+
+ // Compute the lazy compilable version of the code.
+ Code* code = Builtins::builtin(Builtins::LazyCompile);
+ shared_info->set_code(code);
+ function->set_code(code);
+ }
+
+
+ static inline Map* SafeMap(Object* obj) {
+ MapWord map_word = HeapObject::cast(obj)->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return map_word.ToMap();
+ }
+
+
+ static inline bool IsJSBuiltinsObject(Object* obj) {
+ return obj->IsHeapObject() &&
+ (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
+ }
+
+
+ static inline bool IsValidNotBuiltinContext(Object* ctx) {
+ if (!ctx->IsHeapObject()) return false;
+
+ Map* map = SafeMap(ctx);
+ if (!(map == Heap::raw_unchecked_context_map() ||
+ map == Heap::raw_unchecked_catch_context_map() ||
+ map == Heap::raw_unchecked_global_context_map())) {
+ return false;
+ }
+
+ Context* context = reinterpret_cast<Context*>(ctx);
+
+ if (IsJSBuiltinsObject(context->global())) {
+ return false;
+ }
+
+ return true;
+ }
+
+
+ static void VisitCodeEntry(Address entry_address) {
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ VisitPointer(&code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) =
+ reinterpret_cast<Code*>(code)->entry();
+ }
+ }
+
+
+ static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+ JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
+ // The function must have a valid context and not be a builtin.
+ if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
+ FlushCodeForFunction(jsfunction);
+ }
+ VisitJSFunction(map, object);
+ }
+
+
+ static void VisitJSFunction(Map* map, HeapObject* object) {
+#define SLOT_ADDR(obj, offset) \
+ reinterpret_cast<Object**>((obj)->address() + offset)
+
+ VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+
+ VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+
+ VisitPointers(SLOT_ADDR(object,
+ JSFunction::kCodeEntryOffset + kPointerSize),
+ SLOT_ADDR(object, JSFunction::kSize));
+#undef SLOT_ADDR
+ }
+
+
typedef void (*Callback)(Map* map, HeapObject* object);
static VisitorDispatchTable<Callback> table_;
@@ -435,6 +604,66 @@ class MarkingVisitor : public ObjectVisitor {
};
+class CodeMarkingVisitor : public ThreadVisitor {
+ public:
+ void VisitThread(ThreadLocalTop* top) {
+ for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+ MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
+ }
+ }
+};
+
+
+class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ void VisitPointer(Object** slot) {
+ Object* obj = *slot;
+ if (obj->IsHeapObject()) {
+ MarkCompactCollector::MarkObject(HeapObject::cast(obj));
+ }
+ }
+};
+
+
+void MarkCompactCollector::PrepareForCodeFlushing() {
+ if (!FLAG_flush_code) {
+ StaticMarkingVisitor::EnableCodeFlushing(false);
+ return;
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (Debug::IsLoaded() || Debug::has_break_points()) {
+ StaticMarkingVisitor::EnableCodeFlushing(false);
+ return;
+ }
+#endif
+ StaticMarkingVisitor::EnableCodeFlushing(true);
+
+ // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
+ // relies on it being marked before any other descriptor array.
+ MarkObject(Heap::raw_unchecked_empty_descriptor_array());
+
+ // Make sure we are not referencing the code from the stack.
+ for (StackFrameIterator it; !it.done(); it.Advance()) {
+ MarkObject(it.frame()->unchecked_code());
+ }
+
+ // Iterate the archived stacks in all threads to check if
+ // the code is referenced.
+ CodeMarkingVisitor code_marking_visitor;
+ ThreadManager::IterateArchivedThreads(&code_marking_visitor);
+
+ SharedFunctionInfoMarkingVisitor visitor;
+ CompilationCache::IterateFunctions(&visitor);
+
+ ProcessMarkingStack();
+}
+
+
// Visitor class for marking heap roots.
class RootMarkingVisitor : public ObjectVisitor {
public:
@@ -793,6 +1022,8 @@ void MarkCompactCollector::MarkLiveObjects() {
ASSERT(!marking_stack.overflowed());
+ PrepareForCodeFlushing();
+
RootMarkingVisitor root_visitor;
MarkRoots(&root_visitor);
@@ -962,8 +1193,6 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// pair of distinguished invalid map encodings (for single word and multiple
// words) to indicate free regions in the page found during computation of
// forwarding addresses and skipped over in subsequent sweeps.
-static const uint32_t kSingleFreeEncoding = 0;
-static const uint32_t kMultiFreeEncoding = 1;
// Encode a free region, defined by the given start address and size, in the
@@ -971,10 +1200,10 @@ static const uint32_t kMultiFreeEncoding = 1;
void EncodeFreeRegion(Address free_start, int free_size) {
ASSERT(free_size >= kIntSize);
if (free_size == kIntSize) {
- Memory::uint32_at(free_start) = kSingleFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
} else {
ASSERT(free_size >= 2 * kIntSize);
- Memory::uint32_at(free_start) = kMultiFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Memory::int_at(free_start + kIntSize) = free_size;
}
@@ -1404,7 +1633,7 @@ static void SweepNewSpace(NewSpace* space) {
}
-static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
@@ -1445,10 +1674,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start,
- static_cast<int>(current - free_start),
- true,
- false);
+ space->DeallocateBlock(free_start,
+ static_cast<int>(current - free_start),
+ true);
is_previous_alive = true;
}
} else {
@@ -1478,7 +1706,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false, true);
+ space->DeallocateBlock(free_start, size_in_bytes, false);
}
}
} else {
@@ -1494,7 +1722,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) {
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
- dealloc(last_free_start, last_free_size, true, true);
+ space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1525,7 +1753,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false, true);
+ space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
@@ -1546,61 +1774,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
}
-void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Objects in map space are assumed to have size Map::kSize and a
- // valid map in their first word. Thus, we break the free block up into
- // chunks and free them separately.
- ASSERT(size_in_bytes % Map::kSize == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += Map::kSize) {
- Heap::map_space()->Free(a, add_to_freelist);
- }
-}
-
-
-void MarkCompactCollector::DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Free-list elements in cell space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = Heap::cell_space()->object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Heap::cell_space()->Free(a, add_to_freelist);
- }
-}
-
-
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
@@ -1865,14 +2038,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
- SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
- SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
- SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
+ SweepSpace(Heap::old_pointer_space());
+ SweepSpace(Heap::old_data_space());
+ SweepSpace(Heap::code_space());
+ SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
- SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index ad635867c3..72a6fa3b62 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -36,15 +36,6 @@ namespace internal {
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-// Callback function for non-live blocks in the old generation.
-// If add_to_freelist is false then just accounting stats are updated and
-// no attempt to add area to free list is made.
-typedef void (*DeallocateFunction)(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
-
// Forward declarations.
class RootMarkingVisitor;
class MarkingVisitor;
@@ -121,11 +112,17 @@ class MarkCompactCollector: public AllStatic {
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
+ static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj);
+ // Distinguishable invalid map encodings (for single word and multiple words)
+ // that indicate free regions.
+ static const uint32_t kSingleFreeEncoding = 0;
+ static const uint32_t kMultiFreeEncoding = 1;
+
private:
#ifdef DEBUG
enum CollectorState {
@@ -175,6 +172,10 @@ class MarkCompactCollector: public AllStatic {
friend class RootMarkingVisitor;
friend class MarkingVisitor;
friend class StaticMarkingVisitor;
+ friend class CodeMarkingVisitor;
+ friend class SharedFunctionInfoMarkingVisitor;
+
+ static void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
static void MarkLiveObjects();
@@ -319,33 +320,6 @@ class MarkCompactCollector: public AllStatic {
static int IterateLiveObjectsInRange(Address start, Address end,
HeapObjectCallback size_func);
- // Callback functions for deallocating non-live blocks in the old
- // generation.
- static void DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
diff --git a/deps/v8/src/memory.h b/deps/v8/src/memory.h
index 503492a4b5..27f32f7a2e 100644
--- a/deps/v8/src/memory.h
+++ b/deps/v8/src/memory.h
@@ -36,6 +36,10 @@ namespace internal {
class Memory {
public:
+ static uint8_t& uint8_at(Address addr) {
+ return *reinterpret_cast<uint8_t*>(addr);
+ }
+
static uint16_t& uint16_at(Address addr) {
return *reinterpret_cast<uint16_t*>(addr);
}
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 0375e8a173..f26c3b501d 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -831,11 +831,11 @@ function FormatSourcePosition(frame) {
}
var line = "";
var functionName = frame.getFunction().name;
- var methodName = frame.getMethodName();
var addPrefix = true;
var isConstructor = frame.isConstructor();
var isMethodCall = !(frame.isToplevel() || isConstructor);
if (isMethodCall) {
+ var methodName = frame.getMethodName();
line += frame.getTypeName() + ".";
if (functionName) {
line += functionName;
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 26fea25153..95329389e1 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -94,7 +94,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// args
// Clear the context before we push it when entering the JS frame.
- __ li(cp, Operand(0));
+ __ li(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
__ EnterInternalFrame();
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 3ad94e86d3..75e7a293f9 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -309,9 +309,6 @@ class CodeGenerator: public AstVisitor {
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
- static bool PatchInlineRuntimeEntry(Handle<String> name,
- const InlineRuntimeLUT& new_entry,
- InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index d340e4b598..6d49d7503c 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -30,6 +30,7 @@
#include "disassembler.h"
#include "disasm.h"
#include "jsregexp.h"
+#include "objects-visiting.h"
namespace v8 {
namespace internal {
@@ -540,7 +541,8 @@ void JSObject::JSObjectVerify() {
map()->NextFreePropertyIndex()));
}
ASSERT(map()->has_fast_elements() ==
- (elements()->map() == Heap::fixed_array_map()));
+ (elements()->map() == Heap::fixed_array_map() ||
+ elements()->map() == Heap::fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
}
@@ -639,13 +641,25 @@ void Map::MapPrint() {
void Map::MapVerify() {
ASSERT(!Heap::InNewSpace(this));
ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- ASSERT(kPointerSize <= instance_size()
- && instance_size() < Heap::Capacity());
+ ASSERT(instance_size() == kVariableSizeSentinel ||
+ (kPointerSize <= instance_size() &&
+ instance_size() < Heap::Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
}
+void Map::NormalizedMapVerify() {
+ MapVerify();
+ ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
+ ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
+ ASSERT_EQ(0, pre_allocated_property_fields());
+ ASSERT_EQ(0, unused_property_fields());
+ ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
+ visitor_id());
+}
+
+
void CodeCache::CodeCachePrint() {
HeapObject::PrintHeader("CodeCache");
PrintF("\n - default_cache: ");
@@ -1361,6 +1375,21 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() {
}
+void NormalizedMapCache::NormalizedMapCacheVerify() {
+ FixedArray::cast(this)->Verify();
+ if (FLAG_enable_slow_asserts) {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ if (e->IsMap()) {
+ Map::cast(e)->NormalizedMapVerify();
+ } else {
+ ASSERT(e->IsUndefined());
+ }
+ }
+ }
+}
+
+
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 5e8022e51f..bac224f4e9 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -38,7 +38,10 @@
#include "objects.h"
#include "contexts.h"
#include "conversions-inl.h"
+#include "heap.h"
+#include "memory.h"
#include "property.h"
+#include "spaces.h"
namespace v8 {
namespace internal {
@@ -574,6 +577,18 @@ bool Object::IsJSFunctionResultCache() {
}
+bool Object::IsNormalizedMapCache() {
+ if (!IsFixedArray()) return false;
+ if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
+ return false;
+ }
+#ifdef DEBUG
+ reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+#endif
+ return true;
+}
+
+
bool Object::IsCompilationCacheTable() {
return IsHashTable();
}
@@ -1167,7 +1182,8 @@ HeapObject* JSObject::elements() {
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
ASSERT(map()->has_fast_elements() ==
- (value->map() == Heap::fixed_array_map()));
+ (value->map() == Heap::fixed_array_map() ||
+ value->map() == Heap::fixed_cow_array_map()));
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@@ -1397,6 +1413,7 @@ Object* FixedArray::get(int index) {
void FixedArray::set(int index, Smi* value) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1404,6 +1421,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1420,6 +1438,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1428,6 +1447,7 @@ void FixedArray::set(int index,
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+ ASSERT(array->map() != Heap::raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
ASSERT(!Heap::InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
@@ -1435,6 +1455,7 @@ void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
void FixedArray::set_undefined(int index) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(!Heap::InNewSpace(Heap::undefined_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
@@ -1443,6 +1464,7 @@ void FixedArray::set_undefined(int index) {
void FixedArray::set_null(int index) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(!Heap::InNewSpace(Heap::null_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
@@ -1450,12 +1472,27 @@ void FixedArray::set_null(int index) {
void FixedArray::set_the_hole(int index) {
+ ASSERT(map() != Heap::fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(!Heap::InNewSpace(Heap::the_hole_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value());
}
+void FixedArray::set_unchecked(int index, Smi* value) {
+ ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(this, offset, value);
+}
+
+
+void FixedArray::set_null_unchecked(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ ASSERT(!Heap::InNewSpace(Heap::null_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+}
+
+
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
@@ -1637,6 +1674,7 @@ CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
+CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(MapCache)
@@ -2071,7 +2109,16 @@ void ExternalFloatArray::set(int index, float value) {
}
-INT_ACCESSORS(Map, visitor_id, kScavengerCallbackOffset)
+int Map::visitor_id() {
+ return READ_BYTE_FIELD(this, kVisitorIdOffset);
+}
+
+
+void Map::set_visitor_id(int id) {
+ ASSERT(0 <= id && id < 256);
+ WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
+}
+
int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
@@ -2089,20 +2136,28 @@ int Map::pre_allocated_property_fields() {
int HeapObject::SizeFromMap(Map* map) {
- InstanceType instance_type = map->instance_type();
+ int instance_size = map->instance_size();
+ if (instance_size != kVariableSizeSentinel) return instance_size;
+ // We can ignore the "symbol" bit becase it is only set for symbols
+ // and implies a string type.
+ int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask;
// Only inline the most frequent cases.
- if (instance_type == JS_OBJECT_TYPE ||
- (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
- (kStringTag | kConsStringTag) ||
- instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
+ if (instance_type == ASCII_STRING_TYPE) {
+ return SeqAsciiString::SizeFor(
+ reinterpret_cast<SeqAsciiString*>(this)->length());
+ }
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
- // Otherwise do the general size computation.
- return SlowSizeFromMap(map);
+ if (instance_type == STRING_TYPE) {
+ return SeqTwoByteString::SizeFor(
+ reinterpret_cast<SeqTwoByteString*>(this)->length());
+ }
+ ASSERT(instance_type == CODE_TYPE);
+ return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -2275,14 +2330,13 @@ int Code::arguments_count() {
}
-CodeStub::Major Code::major_key() {
+int Code::major_key() {
ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
- return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
- kStubMajorKeyOffset));
+ return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
}
-void Code::set_major_key(CodeStub::Major major) {
+void Code::set_major_key(int major) {
ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
@@ -2380,6 +2434,12 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
}
+Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
+ return HeapObject::
+ FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
+}
+
+
Object* Map::prototype() {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -2398,6 +2458,7 @@ Object* Map::GetFastElementsMap() {
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(true);
+ Counters::map_slow_to_fast_elements.Increment();
return new_map;
}
@@ -2408,6 +2469,7 @@ Object* Map::GetSlowElementsMap() {
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(false);
+ Counters::map_fast_to_slow_elements.Increment();
return new_map;
}
@@ -2539,6 +2601,7 @@ BOOL_ACCESSORS(SharedFunctionInfo,
allows_lazy_compilation,
kAllowLazyCompilation)
+
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
@@ -2638,6 +2701,11 @@ Code* SharedFunctionInfo::code() {
}
+Code* SharedFunctionInfo::unchecked_code() {
+ return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
+}
+
+
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
@@ -2684,20 +2752,38 @@ int SharedFunctionInfo::custom_call_generator_id() {
}
+int SharedFunctionInfo::code_age() {
+ return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
+}
+
+
+void SharedFunctionInfo::set_code_age(int code_age) {
+ set_compiler_hints(compiler_hints() |
+ ((code_age & kCodeAgeMask) << kCodeAgeShift));
+}
+
+
bool JSFunction::IsBuiltin() {
return context()->global()->IsJSBuiltinsObject();
}
Code* JSFunction::code() {
- return Code::cast(READ_FIELD(this, kCodeOffset));
+ return Code::cast(unchecked_code());
+}
+
+
+Code* JSFunction::unchecked_code() {
+ return reinterpret_cast<Code*>(
+ Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
}
void JSFunction::set_code(Code* value) {
// Skip the write barrier because code is never in new space.
ASSERT(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
+ Address entry = value->entry();
+ WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
@@ -2711,6 +2797,12 @@ Object* JSFunction::unchecked_context() {
}
+SharedFunctionInfo* JSFunction::unchecked_shared() {
+ return reinterpret_cast<SharedFunctionInfo*>(
+ READ_FIELD(this, kSharedFunctionInfoOffset));
+}
+
+
void JSFunction::set_context(Object* value) {
ASSERT(value == Heap::undefined_value() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
@@ -2867,7 +2959,7 @@ byte* Code::entry() {
bool Code::contains(byte* pc) {
return (instruction_start() <= pc) &&
- (pc < instruction_start() + instruction_size());
+ (pc <= instruction_start() + instruction_size());
}
@@ -2928,18 +3020,18 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
+ if (map()->has_fast_elements()) {
+ ASSERT(elements()->map() == Heap::fixed_array_map() ||
+ elements()->map() == Heap::fixed_cow_array_map());
+ return FAST_ELEMENTS;
+ }
HeapObject* array = elements();
if (array->IsFixedArray()) {
- // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
- if (array->map() == Heap::fixed_array_map()) {
- ASSERT(map()->has_fast_elements());
- return FAST_ELEMENTS;
- }
+ // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a
+ // FixedArray, but FAST_ELEMENTS is already handled above.
ASSERT(array->IsDictionary());
- ASSERT(!map()->has_fast_elements());
return DICTIONARY_ELEMENTS;
}
- ASSERT(!map()->has_fast_elements());
if (array->IsExternalArray()) {
switch (array->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE:
@@ -3042,6 +3134,19 @@ bool JSObject::AllowsSetElementsLength() {
}
+Object* JSObject::EnsureWritableFastElements() {
+ ASSERT(HasFastElements());
+ FixedArray* elems = FixedArray::cast(elements());
+ if (elems->map() != Heap::fixed_cow_array_map()) return elems;
+ Object* writable_elems = Heap::CopyFixedArray(elems);
+ if (writable_elems->IsFailure()) return writable_elems;
+ FixedArray::cast(writable_elems)->set_map(Heap::fixed_array_map());
+ set_elements(FixedArray::cast(writable_elems));
+ Counters::cow_arrays_converted.Increment();
+ return writable_elems;
+}
+
+
StringDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
return StringDictionary::cast(properties());
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 293c9bf8de..c35e02cc9c 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -101,7 +101,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE:
- case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
@@ -109,6 +108,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
kVisitJSObjectGeneric,
instance_size);
+ case JS_FUNCTION_TYPE:
+ return kVisitJSFunction;
+
case HEAP_NUMBER_TYPE:
case PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 6280bac4ed..a6d6b12c69 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -100,11 +100,15 @@ class StaticVisitorBase : public AllStatic {
kVisitMap,
kVisitPropertyCell,
kVisitSharedFunctionInfo,
+ kVisitJSFunction,
kVisitorIdCount,
kMinObjectSizeInWords = 2
};
+ // Visitor ID should fit in one byte.
+ STATIC_ASSERT(kVisitorIdCount <= 256);
+
// Determine which specialized visitor should be used for given instance type
// and instance type.
static VisitorId GetVisitorId(int instance_type, int instance_size);
@@ -198,13 +202,16 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
- IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
template<int object_size>
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
- IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
};
@@ -214,9 +221,8 @@ template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
- IteratePointers(object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
@@ -268,6 +274,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+ table_.Register(kVisitJSFunction,
+ &JSObjectVisitor::
+ template VisitSpecialized<JSFunction::kSize>);
+
table_.RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -275,8 +285,8 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
kVisitJSObject,
kVisitJSObjectGeneric>();
table_.RegisterSpecializations<StructVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
+ kVisitStruct,
+ kVisitStructGeneric>();
}
static inline int IterateBody(Map* map, HeapObject* obj) {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 5687a3a53f..ef5185145a 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -54,7 +54,8 @@ const int kGetterIndex = 0;
const int kSetterIndex = 1;
-static Object* CreateJSValue(JSFunction* constructor, Object* value) {
+MUST_USE_RESULT static Object* CreateJSValue(JSFunction* constructor,
+ Object* value) {
Object* result = Heap::AllocateJSObject(constructor);
if (result->IsFailure()) return result;
JSValue::cast(result)->set_value(value);
@@ -1024,38 +1025,6 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
}
-int HeapObject::SlowSizeFromMap(Map* map) {
- // Avoid calling functions such as FixedArray::cast during GC, which
- // read map pointer of this object again.
- InstanceType instance_type = map->instance_type();
- uint32_t type = static_cast<uint32_t>(instance_type);
-
- if (instance_type < FIRST_NONSTRING_TYPE
- && (StringShape(instance_type).IsSequential())) {
- if ((type & kStringEncodingMask) == kAsciiStringTag) {
- SeqAsciiString* seq_ascii_this = reinterpret_cast<SeqAsciiString*>(this);
- return seq_ascii_this->SeqAsciiStringSize(instance_type);
- } else {
- SeqTwoByteString* self = reinterpret_cast<SeqTwoByteString*>(this);
- return self->SeqTwoByteStringSize(instance_type);
- }
- }
-
- switch (instance_type) {
- case FIXED_ARRAY_TYPE:
- return FixedArray::BodyDescriptor::SizeOf(map, this);
- case BYTE_ARRAY_TYPE:
- return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
- case CODE_TYPE:
- return reinterpret_cast<Code*>(this)->CodeSize();
- case MAP_TYPE:
- return Map::kSize;
- default:
- return map->instance_size();
- }
-}
-
-
void HeapObject::Iterate(ObjectVisitor* v) {
// Handle header
IteratePointer(v, kMapOffset);
@@ -1098,12 +1067,15 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE:
- case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case JS_FUNCTION_TYPE:
+ reinterpret_cast<JSFunction*>(this)
+ ->JSFunctionIterateBody(object_size, v);
+ break;
case ODDBALL_TYPE:
Oddball::BodyDescriptor::IterateBody(this, v);
break;
@@ -1148,11 +1120,6 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
}
-void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
- IteratePointers(v, HeapObject::kHeaderSize, object_size);
-}
-
-
Object* HeapNumber::HeapNumberToBoolean() {
// NaN, +0, and -0 should return the false object
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2132,6 +2099,121 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
}
+bool NormalizedMapCache::IsCacheable(JSObject* object) {
+ // Caching for global objects is not worth it (there are too few of them).
+ return !object->IsGlobalObject();
+}
+
+
+Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
+ Object* result;
+
+ Map* fast = obj->map();
+ if (!IsCacheable(obj)) {
+ result = fast->CopyNormalized(mode);
+ if (result->IsFailure()) return result;
+ } else {
+ int index = Hash(fast) % kEntries;
+ result = get(index);
+
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ // Make sure that the new slow map has exactly the same hash as the
+ // original fast map. This way we can use hash to check if a slow map
+ // is already in the hash (see Contains method).
+ ASSERT(Hash(fast) == Hash(Map::cast(result)));
+ // The cached map should match newly created normalized map bit-by-bit.
+ Object* fresh = fast->CopyNormalized(mode);
+ if (!fresh->IsFailure()) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kSize) == 0);
+ }
+ }
+#endif
+ return result;
+ }
+
+ result = fast->CopyNormalized(mode);
+ if (result->IsFailure()) return result;
+ set(index, result);
+ }
+ Counters::normalized_maps.Increment();
+
+ return result;
+}
+
+
+bool NormalizedMapCache::Contains(Map* map) {
+ // If the map is present in the cache it can only be at one place:
+ // at the index calculated from the hash. We assume that a slow map has the
+ // same hash as a fast map it has been generated from.
+ int index = Hash(map) % kEntries;
+ return get(index) == map;
+}
+
+
+void NormalizedMapCache::Clear() {
+ int entries = length();
+ for (int i = 0; i != entries; i++) {
+ set_undefined(i);
+ }
+}
+
+
+int NormalizedMapCache::Hash(Map* fast) {
+ // For performance reasons we only hash the 3 most variable fields of a map:
+ // constructor, prototype and bit_field2.
+
+ // Shift away the tag.
+ int hash = (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
+
+ // XOR-ing the prototype and constructor directly yields too many zero bits
+ // when the two pointers are close (which is fairly common).
+ // To avoid this we shift the prototype 4 bits relatively to the constructor.
+ hash ^= (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
+
+ return hash ^ (hash >> 16) ^ fast->bit_field2();
+}
+
+
+bool NormalizedMapCache::CheckHit(Map* slow,
+ Map* fast,
+ PropertyNormalizationMode mode) {
+#ifdef DEBUG
+ slow->NormalizedMapVerify();
+#endif
+ return
+ slow->constructor() == fast->constructor() &&
+ slow->prototype() == fast->prototype() &&
+ slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+ 0 :
+ fast->inobject_properties()) &&
+ slow->instance_type() == fast->instance_type() &&
+ slow->bit_field() == fast->bit_field() &&
+ slow->bit_field2() == fast->bit_field2();
+}
+
+
+Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
+ if (!HasFastProperties() &&
+ NormalizedMapCache::IsCacheable(this) &&
+ Top::context()->global_context()->normalized_map_cache()->
+ Contains(map())) {
+ // Replace the map with the identical copy that can be safely modified.
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+ if (obj->IsFailure()) return obj;
+ Counters::normalized_maps.Increment();
+
+ set_map(Map::cast(obj));
+ }
+ return map()->UpdateCodeCache(name, code);
+}
+
+
Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int expected_additional_properties) {
if (!HasFastProperties()) return this;
@@ -2196,28 +2278,22 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int index = map()->instance_descriptors()->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index);
- // Allocate new map.
- obj = map()->CopyDropDescriptors();
+ obj = Top::context()->global_context()->
+ normalized_map_cache()->Get(this, mode);
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
- // Clear inobject properties if needed by adjusting the instance size and
- // putting in a filler object instead of the inobject properties.
- if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) {
- int instance_size_delta = map()->inobject_properties() * kPointerSize;
- int new_instance_size = map()->instance_size() - instance_size_delta;
- new_map->set_inobject_properties(0);
- new_map->set_instance_size(new_instance_size);
- new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map));
- Heap::CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- }
- new_map->set_unused_property_fields(0);
-
// We have now successfully allocated all the necessary objects.
// Changes can now be made with the guarantee that all of them take effect.
+
+ // Resize the object in the heap if necessary.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = map()->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Heap::CreateFillerObjectAt(this->address() + new_instance_size,
+ instance_size_delta);
+
set_map(new_map);
- map()->set_instance_descriptors(Heap::empty_descriptor_array());
set_properties(dictionary);
@@ -2338,6 +2414,8 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
ASSERT(!HasPixelElements() && !HasExternalArrayElements());
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
+ Object* obj = EnsureWritableFastElements();
+ if (obj->IsFailure()) return obj;
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
@@ -2418,6 +2496,8 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
+ Object* obj = EnsureWritableFastElements();
+ if (obj->IsFailure()) return obj;
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
@@ -2601,7 +2681,8 @@ bool JSObject::ReferencesObject(Object* obj) {
Object* JSObject::PreventExtensions() {
// If there are fast elements we normalize.
if (HasFastElements()) {
- NormalizeElements();
+ Object* ok = NormalizeElements();
+ if (ok->IsFailure()) return ok;
}
// Make sure that we never go back to fast case.
element_dictionary()->set_requires_slow_elements();
@@ -3113,6 +3194,33 @@ Object* Map::CopyDropDescriptors() {
}
+Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+ int new_instance_size = instance_size();
+ if (mode == CLEAR_INOBJECT_PROPERTIES) {
+ new_instance_size -= inobject_properties() * kPointerSize;
+ }
+
+ Object* result = Heap::AllocateMap(instance_type(), new_instance_size);
+ if (result->IsFailure()) return result;
+
+ if (mode != CLEAR_INOBJECT_PROPERTIES) {
+ Map::cast(result)->set_inobject_properties(inobject_properties());
+ }
+
+ Map::cast(result)->set_prototype(prototype());
+ Map::cast(result)->set_constructor(constructor());
+
+ Map::cast(result)->set_bit_field(bit_field());
+ Map::cast(result)->set_bit_field2(bit_field2());
+
+#ifdef DEBUG
+ Map::cast(result)->NormalizedMapVerify();
+#endif
+
+ return result;
+}
+
+
Object* Map::CopyDropTransitions() {
Object* new_map = CopyDropDescriptors();
if (new_map->IsFailure()) return new_map;
@@ -4880,24 +4988,21 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
}
-static inline uint32_t HashField(uint32_t hash,
- bool is_array_index,
- int length = -1) {
- uint32_t result = (hash << String::kHashShift);
- if (is_array_index) {
- // For array indexes mix the length into the hash as an array index could
- // be zero.
- ASSERT(length > 0);
- ASSERT(length <= String::kMaxArrayIndexSize);
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
- result &= ~String::kIsNotArrayIndexMask;
- result |= length << String::kArrayIndexHashLengthShift;
- } else {
- result |= String::kIsNotArrayIndexMask;
- }
- return result;
+uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
+ // For array indexes mix the length into the hash as an array index could
+ // be zero.
+ ASSERT(length > 0);
+ ASSERT(length <= String::kMaxArrayIndexSize);
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+
+ value <<= String::kHashShift;
+ value |= length << String::kArrayIndexHashLengthShift;
+
+ ASSERT((value & String::kIsNotArrayIndexMask) == 0);
+ ASSERT((length > String::kMaxCachedArrayIndexLength) ||
+ (value & String::kContainsCachedArrayIndexMask) == 0);
+ return value;
}
@@ -4905,14 +5010,11 @@ uint32_t StringHasher::GetHashField() {
ASSERT(is_valid());
if (length_ <= String::kMaxHashCalcLength) {
if (is_array_index()) {
- return v8::internal::HashField(array_index(), true, length_);
- } else {
- return v8::internal::HashField(GetHash(), false);
+ return MakeArrayIndexHash(array_index(), length_);
}
- uint32_t payload = v8::internal::HashField(GetHash(), false);
- return payload;
+ return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
} else {
- return v8::internal::HashField(length_, false);
+ return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
}
}
@@ -5009,8 +5111,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
- contents->set(i + 1, NullDescriptorDetails);
- contents->set_null(i);
+ contents->set_unchecked(i + 1, NullDescriptorDetails);
+ contents->set_null_unchecked(i);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
@@ -5021,6 +5123,15 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
}
+void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
+ // Iterate over all fields in the body but take care in dealing with
+ // the code entry.
+ IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
+ v->VisitCodeEntry(this->address() + kCodeEntryOffset);
+ IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
+}
+
+
Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject());
@@ -5037,7 +5148,6 @@ Object* JSFunction::SetInstancePrototype(Object* value) {
}
-
Object* JSFunction::SetPrototype(Object* value) {
ASSERT(should_have_prototype());
Object* construct_prototype = value;
@@ -5095,7 +5205,7 @@ Object* Oddball::Initialize(const char* to_string, Object* to_number) {
bool SharedFunctionInfo::HasSourceCode() {
return !script()->IsUndefined() &&
- !Script::cast(script())->source()->IsUndefined();
+ !reinterpret_cast<Script*>(script())->source()->IsUndefined();
}
@@ -5265,6 +5375,16 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
}
+void ObjectVisitor::VisitCodeEntry(Address entry_address) {
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ VisitPointer(&code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
+ }
+}
+
+
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
@@ -5591,6 +5711,8 @@ Object* JSObject::SetElementsLength(Object* len) {
int old_capacity = FixedArray::cast(elements())->length();
if (value <= old_capacity) {
if (IsJSArray()) {
+ Object* obj = EnsureWritableFastElements();
+ if (obj->IsFailure()) return obj;
int old_length = FastD2I(JSArray::cast(this)->length()->Number());
// NOTE: We may be able to optimize this by removing the
// last part of the elements backing storage array and
@@ -6051,7 +6173,9 @@ Object* JSObject::SetElementWithCallback(Object* structure,
Object* JSObject::SetFastElement(uint32_t index, Object* value) {
ASSERT(HasFastElements());
- FixedArray* elms = FixedArray::cast(elements());
+ Object* elms_obj = EnsureWritableFastElements();
+ if (elms_obj->IsFailure()) return elms_obj;
+ FixedArray* elms = FixedArray::cast(elms_obj);
uint32_t elms_length = static_cast<uint32_t>(elms->length());
if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) {
@@ -6095,6 +6219,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
return SetElement(index, value);
}
+
Object* JSObject::SetElement(uint32_t index, Object* value) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -7573,6 +7698,9 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
set_map(new_map);
set_elements(fast_elements);
+ } else {
+ Object* obj = EnsureWritableFastElements();
+ if (obj->IsFailure()) return obj;
}
ASSERT(HasFastElements());
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index d2f6d3559b..7f6538cf9c 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -29,7 +29,6 @@
#define V8_OBJECTS_H_
#include "builtins.h"
-#include "code-stubs.h"
#include "smart-pointer.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -201,6 +200,10 @@ enum PropertyNormalizationMode {
};
+// Instance size sentinel for objects of variable size.
+static const int kVariableSizeSentinel = 0;
+
+
// All Maps have a field instance_type containing a InstanceType.
// It describes the type of the instances.
//
@@ -230,19 +233,21 @@ enum PropertyNormalizationMode {
V(CONS_SYMBOL_TYPE) \
V(CONS_ASCII_SYMBOL_TYPE) \
V(EXTERNAL_SYMBOL_TYPE) \
+ V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_SYMBOL_TYPE) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
V(CONS_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
\
V(MAP_TYPE) \
V(CODE_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
V(ODDBALL_TYPE) \
+ V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(PROXY_TYPE) \
@@ -260,11 +265,9 @@ enum PropertyNormalizationMode {
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
- V(FIXED_ARRAY_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
V(CALL_HANDLER_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
@@ -273,6 +276,9 @@ enum PropertyNormalizationMode {
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
\
+ V(FIXED_ARRAY_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ \
V(JS_VALUE_TYPE) \
V(JS_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
@@ -301,11 +307,11 @@ enum PropertyNormalizationMode {
// iterate over them.
#define STRING_TYPE_LIST(V) \
V(SYMBOL_TYPE, \
- SeqTwoByteString::kAlignedSize, \
+ kVariableSizeSentinel, \
symbol, \
Symbol) \
V(ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
+ kVariableSizeSentinel, \
ascii_symbol, \
AsciiSymbol) \
V(CONS_SYMBOL_TYPE, \
@@ -329,11 +335,11 @@ enum PropertyNormalizationMode {
external_ascii_symbol, \
ExternalAsciiSymbol) \
V(STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
+ kVariableSizeSentinel, \
string, \
String) \
V(ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
+ kVariableSizeSentinel, \
ascii_string, \
AsciiString) \
V(CONS_STRING_TYPE, \
@@ -355,7 +361,7 @@ enum PropertyNormalizationMode {
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
- ExternalAsciiString) \
+ ExternalAsciiString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -631,6 +637,7 @@ class Object BASE_EMBEDDED {
inline bool IsDictionary();
inline bool IsSymbolTable();
inline bool IsJSFunctionResultCache();
+ inline bool IsNormalizedMapCache();
inline bool IsCompilationCacheTable();
inline bool IsCodeCacheHashTable();
inline bool IsMapCache();
@@ -1015,10 +1022,6 @@ class HeapObject: public Object {
// object, and so is safe to call while the map pointer is modified.
void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
- // This method only applies to struct objects. Iterates over all the fields
- // of this struct.
- void IterateStructBody(int object_size, ObjectVisitor* v);
-
// Returns the heap object's size in bytes
inline int Size();
@@ -1097,10 +1100,6 @@ class HeapObject: public Object {
// as above, for the single element at "offset"
inline void IteratePointer(ObjectVisitor* v, int offset);
- // Computes the object size from the map.
- // Should only be used from SizeFromMap.
- int SlowSizeFromMap(Map* map);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
};
@@ -1211,7 +1210,9 @@ class JSObject: public HeapObject {
public:
enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
enum ElementsKind {
+ // The only "fast" kind.
FAST_ELEMENTS,
+ // All the kinds below are "slow".
DICTIONARY_ELEMENTS,
PIXEL_ELEMENTS,
EXTERNAL_BYTE_ELEMENTS,
@@ -1232,8 +1233,21 @@ class JSObject: public HeapObject {
inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
- // elements is a FixedArray in the fast case, a Dictionary in the slow
- // case, and a PixelArray or ExternalArray in special cases.
+ //
+ // Elements can be in two general modes: fast and slow. Each mode
+ // corrensponds to a set of object representations of elements that
+ // have something in common.
+ //
+ // In the fast mode elements is a FixedArray and so each element can
+ // be quickly accessed. This fact is used in the generated code. The
+ // elements array can have one of the two maps in this mode:
+ // fixed_array_map or fixed_cow_array_map (for copy-on-write
+ // arrays). In the latter case the elements array may be shared by a
+ // few objects and so before writing to any element the array must
+ // be copied. Use EnsureWritableFastElements in this case.
+ //
+ // In the slow mode elements is either a NumberDictionary or a
+ // PixelArray or an ExternalArray.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
inline Object* ResetElements();
@@ -1251,6 +1265,8 @@ class JSObject: public HeapObject {
inline bool HasExternalFloatElements();
inline bool AllowsSetElementsLength();
inline NumberDictionary* element_dictionary(); // Gets slow elements.
+ // Requires: this->HasFastElements().
+ inline Object* EnsureWritableFastElements();
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
@@ -1258,7 +1274,7 @@ class JSObject: public HeapObject {
Object* PrepareElementsForSort(uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
// a dictionary, and it will stay a dictionary.
- Object* PrepareSlowElementsForSort(uint32_t limit);
+ MUST_USE_RESULT Object* PrepareSlowElementsForSort(uint32_t limit);
Object* SetProperty(String* key,
Object* value,
@@ -1296,12 +1312,13 @@ class JSObject: public HeapObject {
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details);
+ MUST_USE_RESULT Object* SetNormalizedProperty(String* name,
+ Object* value,
+ PropertyDetails details);
// Deletes the named property in a normalized object.
- Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteNormalizedProperty(String* name,
+ DeleteMode mode);
// Returns the class name ([[Class]] property in the specification).
String* class_name();
@@ -1319,11 +1336,13 @@ class JSObject: public HeapObject {
String* name);
PropertyAttributes GetLocalPropertyAttribute(String* name);
- Object* DefineAccessor(String* name, bool is_getter, JSFunction* fun,
- PropertyAttributes attributes);
+ MUST_USE_RESULT Object* DefineAccessor(String* name,
+ bool is_getter,
+ JSFunction* fun,
+ PropertyAttributes attributes);
Object* LookupAccessor(String* name, bool is_getter);
- Object* DefineAccessor(AccessorInfo* info);
+ MUST_USE_RESULT Object* DefineAccessor(AccessorInfo* info);
// Used from Object::GetProperty().
Object* GetPropertyWithFailedAccessCheck(Object* receiver,
@@ -1374,8 +1393,8 @@ class JSObject: public HeapObject {
inline Object* GetHiddenPropertiesObject();
inline Object* SetHiddenPropertiesObject(Object* hidden_obj);
- Object* DeleteProperty(String* name, DeleteMode mode);
- Object* DeleteElement(uint32_t index, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteProperty(String* name, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteElement(uint32_t index, DeleteMode mode);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1403,19 +1422,20 @@ class JSObject: public HeapObject {
bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
- Object* SetFastElement(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetFastElement(uint32_t index, Object* value);
// Set the index'th array element.
// A Failure object is returned if GC is needed.
- Object* SetElement(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetElement(uint32_t index, Object* value);
// Returns the index'th element.
// The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
- Object* SetFastElementsCapacityAndLength(int capacity, int length);
- Object* SetSlowElements(Object* length);
+ MUST_USE_RESULT Object* SetFastElementsCapacityAndLength(int capacity,
+ int length);
+ MUST_USE_RESULT Object* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
// objects.
@@ -1428,7 +1448,7 @@ class JSObject: public HeapObject {
bool HasRealNamedCallbackProperty(String* key);
// Initializes the array to a certain length
- Object* SetElementsLength(Object* length);
+ MUST_USE_RESULT Object* SetElementsLength(Object* length);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -1535,6 +1555,8 @@ class JSObject: public HeapObject {
int expected_additional_properties);
Object* NormalizeElements();
+ Object* UpdateMapCodeCache(String* name, Code* code);
+
// Transform slow named properties to fast variants.
// Returns failure if allocation failed.
Object* TransformToFastProperties(int unused_property_fields);
@@ -1563,7 +1585,7 @@ class JSObject: public HeapObject {
static inline JSObject* cast(Object* obj);
// Disalow further properties to be added to the object.
- Object* PreventExtensions();
+ MUST_USE_RESULT Object* PreventExtensions();
// Dispatched behavior.
@@ -1636,16 +1658,20 @@ class JSObject: public HeapObject {
uint32_t index,
Object* value,
JSObject* holder);
- Object* SetElementWithInterceptor(uint32_t index, Object* value);
- Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetElementWithInterceptor(uint32_t index,
+ Object* value);
+ MUST_USE_RESULT Object* SetElementWithoutInterceptor(uint32_t index,
+ Object* value);
Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
- Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode);
- Object* DeletePropertyWithInterceptor(String* name);
+ MUST_USE_RESULT Object* DeletePropertyPostInterceptor(String* name,
+ DeleteMode mode);
+ MUST_USE_RESULT Object* DeletePropertyWithInterceptor(String* name);
- Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode);
- Object* DeleteElementWithInterceptor(uint32_t index);
+ MUST_USE_RESULT Object* DeleteElementPostInterceptor(uint32_t index,
+ DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteElementWithInterceptor(uint32_t index);
PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
String* name,
@@ -1667,13 +1693,14 @@ class JSObject: public HeapObject {
bool HasDenseElements();
bool CanSetCallback(String* name);
- Object* SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- Object* SetPropertyCallback(String* name,
- Object* structure,
- PropertyAttributes attributes);
- Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
+ MUST_USE_RESULT Object* SetElementCallback(uint32_t index,
+ Object* structure,
+ PropertyAttributes attributes);
+ MUST_USE_RESULT Object* SetPropertyCallback(String* name,
+ Object* structure,
+ PropertyAttributes attributes);
+ MUST_USE_RESULT Object* DefineGetterSetter(String* name,
+ PropertyAttributes attributes);
void LookupInDescriptor(String* name, LookupResult* result);
@@ -1703,18 +1730,22 @@ class FixedArray: public HeapObject {
inline void set_null(int index);
inline void set_the_hole(int index);
+ // Setters with less debug checks for the GC to use.
+ inline void set_unchecked(int index, Smi* value);
+ inline void set_null_unchecked(int index);
+
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
// Copy operations.
inline Object* Copy();
- Object* CopySize(int new_length);
+ MUST_USE_RESULT Object* CopySize(int new_length);
// Add the elements of a JSArray to this FixedArray.
- Object* AddKeysFromJSArray(JSArray* array);
+ MUST_USE_RESULT Object* AddKeysFromJSArray(JSArray* array);
// Compute the union of this and other.
- Object* UnionOfKeys(FixedArray* other);
+ MUST_USE_RESULT Object* UnionOfKeys(FixedArray* other);
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -1853,11 +1884,12 @@ class DescriptorArray: public FixedArray {
// or null), its enumeration index is kept as is.
// If adding a real property, map transitions must be removed. If adding
// a transition, they must not be removed. All null descriptors are removed.
- Object* CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag);
+ MUST_USE_RESULT Object* CopyInsert(Descriptor* descriptor,
+ TransitionFlag transition_flag);
// Remove all transitions. Return a copy of the array with all transitions
// removed, or a Failure object if the new array could not be allocated.
- Object* RemoveTransitions();
+ MUST_USE_RESULT Object* RemoveTransitions();
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
@@ -1885,7 +1917,7 @@ class DescriptorArray: public FixedArray {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- static Object* Allocate(int number_of_descriptors);
+ MUST_USE_RESULT static Object* Allocate(int number_of_descriptors);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -2025,8 +2057,9 @@ class HashTable: public FixedArray {
}
// Returns a new HashTable object. Might return Failure.
- static Object* Allocate(int at_least_space_for,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* Allocate(
+ int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED);
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
@@ -2120,7 +2153,7 @@ class HashTable: public FixedArray {
}
// Ensure enough space for n additional elements.
- Object* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT Object* EnsureCapacity(int n, Key key);
};
@@ -2136,7 +2169,7 @@ class HashTableKey {
virtual uint32_t HashForObject(Object* key) = 0;
// Returns the key object for storing into the hash table.
// If allocations fails a failure object is returned.
- virtual Object* AsObject() = 0;
+ MUST_USE_RESULT virtual Object* AsObject() = 0;
// Required.
virtual ~HashTableKey() {}
};
@@ -2152,7 +2185,7 @@ class SymbolTableShape {
static uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2202,7 +2235,7 @@ class MapCacheShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2290,7 +2323,7 @@ class Dictionary: public HashTable<Shape, Key> {
}
// Returns a new array for dictionary usage. Might return Failure.
- static Object* Allocate(int at_least_space_for);
+ MUST_USE_RESULT static Object* Allocate(int at_least_space_for);
// Ensure enough space for n additional elements.
Object* EnsureCapacity(int n, Key key);
@@ -2332,7 +2365,7 @@ class StringDictionaryShape {
static inline bool IsMatch(String* key, Object* other);
static inline uint32_t Hash(String* key);
static inline uint32_t HashForObject(String* key, Object* object);
- static inline Object* AsObject(String* key);
+ MUST_USE_RESULT static inline Object* AsObject(String* key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = true;
@@ -2364,7 +2397,7 @@ class NumberDictionaryShape {
static inline bool IsMatch(uint32_t key, Object* other);
static inline uint32_t Hash(uint32_t key);
static inline uint32_t HashForObject(uint32_t key, Object* object);
- static inline Object* AsObject(uint32_t key);
+ MUST_USE_RESULT static inline Object* AsObject(uint32_t key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
@@ -2445,6 +2478,35 @@ class JSFunctionResultCache: public FixedArray {
};
+// The cache for maps used by normalized (dictionary mode) objects.
+// Such maps do not have property descriptors, so a typical program
+// needs very limited number of distinct normalized maps.
+class NormalizedMapCache: public FixedArray {
+ public:
+ static const int kEntries = 64;
+
+ static bool IsCacheable(JSObject* object);
+
+ Object* Get(JSObject* object, PropertyNormalizationMode mode);
+
+ bool Contains(Map* map);
+
+ void Clear();
+
+ // Casting
+ static inline NormalizedMapCache* cast(Object* obj);
+
+#ifdef DEBUG
+ void NormalizedMapCacheVerify();
+#endif
+
+ private:
+ static int Hash(Map* fast);
+
+ static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
+};
+
+
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
@@ -2762,7 +2824,12 @@ class Code: public HeapObject {
public:
// Opaque data type for encapsulating code flags like kind, inline
// cache state, and arguments count.
- enum Flags { };
+ // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that
+ // enumeration type has correct value range (see Issue 830 for more details).
+ enum Flags {
+ FLAGS_MIN_VALUE = kMinInt,
+ FLAGS_MAX_VALUE = kMaxInt
+ };
enum Kind {
FUNCTION,
@@ -2829,8 +2896,8 @@ class Code: public HeapObject {
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
- inline CodeStub::Major major_key();
- inline void set_major_key(CodeStub::Major major);
+ inline int major_key();
+ inline void set_major_key(int major);
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
@@ -2858,6 +2925,9 @@ class Code: public HeapObject {
// Convert a target address into a code object.
static inline Code* GetCodeFromTargetAddress(Address address);
+ // Convert an entry address into an object.
+ static inline Object* GetObjectFromEntryAddress(Address location_of_address);
+
// Returns the address of the first instruction.
inline byte* instruction_start();
@@ -2964,6 +3034,8 @@ class Code: public HeapObject {
class Map: public HeapObject {
public:
// Instance size.
+ // Size in bytes or kVariableSizeSentinel if instances do not have
+ // a fixed size.
inline int instance_size();
inline void set_instance_size(int value);
@@ -3061,7 +3133,8 @@ class Map: public HeapObject {
inline bool is_extensible();
// Tells whether the instance has fast elements.
- void set_has_fast_elements(bool value) {
+ // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
+ inline void set_has_fast_elements(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kHasFastElements));
} else {
@@ -3069,7 +3142,7 @@ class Map: public HeapObject {
}
}
- bool has_fast_elements() {
+ inline bool has_fast_elements() {
return ((1 << kHasFastElements) & bit_field2()) != 0;
}
@@ -3090,11 +3163,13 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
- Object* CopyDropDescriptors();
+ MUST_USE_RESULT Object* CopyDropDescriptors();
+
+ MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- Object* CopyDropTransitions();
+ MUST_USE_RESULT Object* CopyDropTransitions();
// Returns this map if it has the fast elements bit set, otherwise
// returns a copy of the map, with all transitions dropped from the
@@ -3127,7 +3202,7 @@ class Map: public HeapObject {
inline void ClearCodeCache();
// Update code cache.
- Object* UpdateCodeCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateCodeCache(String* name, Code* code);
// Returns the found code or undefined if absent.
Object* FindInCodeCache(String* name, Code::Flags flags);
@@ -3154,6 +3229,7 @@ class Map: public HeapObject {
#ifdef DEBUG
void MapPrint();
void MapVerify();
+ void NormalizedMapVerify();
#endif
inline int visitor_id();
@@ -3169,8 +3245,7 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kScavengerCallbackOffset = kCodeCacheOffset + kPointerSize;
- static const int kPadStart = kScavengerCallbackOffset + kPointerSize;
+ static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
@@ -3187,7 +3262,8 @@ class Map: public HeapObject {
static const int kPreAllocatedPropertyFieldsByte = 2;
static const int kPreAllocatedPropertyFieldsOffset =
kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
- // The byte at position 3 is not in use at the moment.
+ static const int kVisitorIdByte = 3;
+ static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
// Byte offsets within kInstanceAttributesOffset attributes.
static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
@@ -3344,6 +3420,8 @@ class SharedFunctionInfo: public HeapObject {
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
+ inline Code* unchecked_code();
+
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -3451,6 +3529,15 @@ class SharedFunctionInfo: public HeapObject {
inline bool allows_lazy_compilation();
inline void set_allows_lazy_compilation(bool flag);
+ // Indicates how many full GCs this function has survived with assigned
+ // code object. Used to determine when it is relatively safe to flush
+ // this code object and replace it with lazy compilation stub.
+ // Age is reset when GC notices that the code object is referenced
+ // from the stack or compilation cache.
+ inline int code_age();
+ inline void set_code_age(int age);
+
+
// Check whether a inlined constructor can be generated with the given
// prototype.
bool CanGenerateInlineConstructor(Object* prototype);
@@ -3581,6 +3668,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1;
static const int kAllowLazyCompilation = 2;
+ static const int kCodeAgeShift = 3;
+ static const int kCodeAgeMask = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -3596,6 +3685,8 @@ class JSFunction: public JSObject {
// can be shared by instances.
DECL_ACCESSORS(shared, SharedFunctionInfo)
+ inline SharedFunctionInfo* unchecked_shared();
+
// [context]: The context for this function.
inline Context* context();
inline Object* unchecked_context();
@@ -3608,6 +3699,8 @@ class JSFunction: public JSObject {
inline Code* code();
inline void set_code(Code* value);
+ inline Code* unchecked_code();
+
// Tells whether this function is builtin.
inline bool IsBuiltin();
@@ -3636,7 +3729,7 @@ class JSFunction: public JSObject {
inline Object* prototype();
inline Object* instance_prototype();
Object* SetInstancePrototype(Object* value);
- Object* SetPrototype(Object* value);
+ MUST_USE_RESULT Object* SetPrototype(Object* value);
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
@@ -3660,6 +3753,10 @@ class JSFunction: public JSObject {
// Casting.
static inline JSFunction* cast(Object* obj);
+ // Iterates the objects, including code objects indirectly referenced
+ // through pointers to the first instruction in the code object.
+ void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
+
// Dispatched behavior.
#ifdef DEBUG
void JSFunctionPrint();
@@ -3673,9 +3770,9 @@ class JSFunction: public JSObject {
static Context* GlobalContextFromLiterals(FixedArray* literals);
// Layout descriptors.
- static const int kCodeOffset = JSObject::kHeaderSize;
+ static const int kCodeEntryOffset = JSObject::kHeaderSize;
static const int kPrototypeOrInitialMapOffset =
- kCodeOffset + kPointerSize;
+ kCodeEntryOffset + kPointerSize;
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
@@ -3973,7 +4070,7 @@ class CompilationCacheShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -4006,7 +4103,7 @@ class CodeCache: public Struct {
DECL_ACCESSORS(normal_type_cache, Object)
// Add the code object to the cache.
- Object* Update(String* name, Code* code);
+ MUST_USE_RESULT Object* Update(String* name, Code* code);
// Lookup code object in the cache. Returns code object if found and undefined
// if not.
@@ -4034,8 +4131,8 @@ class CodeCache: public Struct {
static const int kSize = kNormalTypeCacheOffset + kPointerSize;
private:
- Object* UpdateDefaultCache(String* name, Code* code);
- Object* UpdateNormalTypeCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateDefaultCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateNormalTypeCache(String* name, Code* code);
Object* LookupDefaultCache(String* name, Code::Flags flags);
Object* LookupNormalTypeCache(String* name, Code::Flags flags);
@@ -4063,7 +4160,7 @@ class CodeCacheHashTableShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -4076,7 +4173,7 @@ class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
HashTableKey*> {
public:
Object* Lookup(String* name, Code::Flags flags);
- Object* Put(String* name, Code* code);
+ MUST_USE_RESULT Object* Put(String* name, Code* code);
int GetIndex(String* name, Code::Flags flags);
void RemoveByIndex(int index);
@@ -4123,6 +4220,11 @@ class StringHasher {
void invalidate() { is_valid_ = false; }
+ // Calculated hash value for a string consisting of 1 to
+ // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
+ // value is represented decimal value.
+ static uint32_t MakeArrayIndexHash(uint32_t value, int length);
+
private:
uint32_t array_index() {
@@ -4365,6 +4467,7 @@ class String: public HeapObject {
kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
STATIC_CHECK((kArrayIndexLengthBits > 0));
+ STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
static const int kArrayIndexHashLengthShift =
kArrayIndexValueBits + kNofHashBitFields;
@@ -4938,12 +5041,13 @@ class JSArray: public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi* length);
- Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* JSArrayUpdateLengthFromIndex(uint32_t index,
+ Object* value);
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- Object* Initialize(int capacity);
+ MUST_USE_RESULT Object* Initialize(int capacity);
// Set the content of the array to the content of storage.
inline void SetContent(FixedArray* storage);
@@ -5390,6 +5494,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a code target in the instruction stream.
virtual void VisitCodeTarget(RelocInfo* rinfo);
+ // Visits a code entry in a JS function.
+ virtual void VisitCodeEntry(Address entry_address);
+
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 1df7c21450..7667e89a3c 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
+#include "func-name-inferrer.h"
#include "messages.h"
#include "parser.h"
#include "platform.h"
@@ -153,7 +154,7 @@ class Parser {
ParserLog* log_;
bool is_pre_parsing_;
ScriptDataImpl* pre_data_;
- bool seen_loop_stmt_; // Used for inner loop detection.
+ FuncNameInferrer* fni_;
bool inside_with() const { return with_nesting_level_ > 0; }
ParserFactory* factory() const { return factory_; }
@@ -213,6 +214,11 @@ class Parser {
ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
+ Expression* NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position);
+
// Populate the constant properties fixed array for a materialized object
// literal.
void BuildObjectLiteralConstantProperties(
@@ -260,6 +266,8 @@ class Parser {
bool Check(Token::Value token);
void ExpectSemicolon(bool* ok);
+ Handle<String> GetSymbol(bool* ok);
+
// Get odd-ball literals.
Literal* GetLiteralUndefined();
Literal* GetLiteralTheHole();
@@ -338,9 +346,7 @@ class Parser {
template <typename T, int initial_size>
class BufferedZoneList {
public:
-
- BufferedZoneList() :
- list_(NULL), last_(NULL) {}
+ BufferedZoneList() : list_(NULL), last_(NULL) {}
// Adds element at end of list. This element is buffered and can
// be read using last() or removed using RemoveLast until a new Add or until
@@ -411,6 +417,7 @@ class BufferedZoneList {
T* last_;
};
+
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder: public ZoneObject {
public:
@@ -649,6 +656,7 @@ class RegExpParser {
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
+
private:
enum SubexpressionType {
INITIAL,
@@ -744,6 +752,10 @@ class TemporaryScope BASE_EMBEDDED {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
+
+ void AddLoop() { loop_count_++; }
+ bool ContainsLoops() const { return loop_count_ > 0; }
+
private:
// Captures the number of literals that need materialization in the
// function. Includes regexp literals, and boilerplate for object
@@ -753,9 +765,14 @@ class TemporaryScope BASE_EMBEDDED {
// Properties count estimation.
int expected_property_count_;
+ // Keeps track of assignments to properties of this. Used for
+ // optimizing constructors.
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
+ // Captures the number of loops inside the scope.
+ int loop_count_;
+
// Bookkeeping
Parser* parser_;
TemporaryScope* parent_;
@@ -769,6 +786,7 @@ TemporaryScope::TemporaryScope(Parser* parser)
expected_property_count_(0),
only_simple_this_property_assignments_(false),
this_property_assignments_(Factory::empty_fixed_array()),
+ loop_count_(0),
parser_(parser),
parent_(parser->temp_scope_) {
parser->temp_scope_ = this;
@@ -812,7 +830,7 @@ class ParserFactory BASE_EMBEDDED {
virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
- virtual Handle<String> LookupSymbol(const char* string, int length) {
+ virtual Handle<String> LookupSymbol(int index, Vector<const char> string) {
return Handle<String>();
}
@@ -851,23 +869,48 @@ class ParserLog BASE_EMBEDDED {
public:
virtual ~ParserLog() { }
- // Records the occurrence of a function. The returned object is
- // only guaranteed to be valid until the next function has been
- // logged.
+ // Records the occurrence of a function.
virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); }
-
+ virtual void LogSymbol(int start, Vector<const char> symbol) {}
+ // Return the current position in the function entry log.
+ virtual int function_position() { return 0; }
+ virtual int symbol_position() { return 0; }
+ virtual int symbol_ids() { return 0; }
virtual void LogError() { }
};
class AstBuildingParserFactory : public ParserFactory {
public:
- AstBuildingParserFactory() : ParserFactory(false) { }
+ explicit AstBuildingParserFactory(int expected_symbols)
+ : ParserFactory(false), symbol_cache_(expected_symbols) { }
virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
- virtual Handle<String> LookupSymbol(const char* string, int length) {
- return Factory::LookupSymbol(Vector<const char>(string, length));
+ virtual Handle<String> LookupSymbol(int symbol_id,
+ Vector<const char> string) {
+ // If there is no preparse data, we have no simpler way to identify similar
+ // symbols.
+ if (symbol_id < 0) return Factory::LookupSymbol(string);
+ return LookupCachedSymbol(symbol_id, string);
+ }
+
+ Handle<String> LookupCachedSymbol(int symbol_id,
+ Vector<const char> string) {
+ // Make sure the cache is large enough to hold the symbol identifier.
+ if (symbol_cache_.length() <= symbol_id) {
+ // Increase length to index + 1.
+ symbol_cache_.AddBlock(Handle<String>::null(),
+ symbol_id + 1 - symbol_cache_.length());
+ }
+ Handle<String> result = symbol_cache_.at(symbol_id);
+ if (result.is_null()) {
+ result = Factory::LookupSymbol(string);
+ symbol_cache_.at(symbol_id) = result;
+ return result;
+ }
+ Counters::total_preparse_symbols_skipped.Increment();
+ return result;
}
virtual Handle<String> EmptySymbol() {
@@ -885,6 +928,8 @@ class AstBuildingParserFactory : public ParserFactory {
}
virtual Statement* EmptyStatement();
+ private:
+ List<Handle<String> > symbol_cache_;
};
@@ -892,80 +937,190 @@ class ParserRecorder: public ParserLog {
public:
ParserRecorder();
virtual FunctionEntry LogFunction(int start);
+ virtual void LogSymbol(int start, Vector<const char> literal) {
+ int hash = vector_hash(literal);
+ HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true);
+ int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ if (id == 0) {
+ // Put (symbol_id_ + 1) into entry and increment it.
+ symbol_id_++;
+ entry->value = reinterpret_cast<void*>(symbol_id_);
+ Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
+ entry->key = &symbol[0];
+ } else {
+ // Log a reuse of an earlier seen symbol.
+ symbol_store_.Add(start);
+ symbol_store_.Add(id - 1);
+ }
+ }
virtual void LogError() { }
virtual void LogMessage(Scanner::Location loc,
const char* message,
Vector<const char*> args);
- void WriteString(Vector<const char> str);
- static const char* ReadString(unsigned* start, int* chars);
- List<unsigned>* store() { return &store_; }
- private:
- bool has_error_;
- List<unsigned> store_;
-};
+ Vector<unsigned> ExtractData() {
+ int function_size = function_store_.size();
+ int symbol_size = symbol_store_.size();
+ int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size;
+ Vector<unsigned> data = Vector<unsigned>::New(total_size);
+ preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size;
+ preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_;
+ memcpy(data.start(), preamble_, sizeof(preamble_));
+ int symbol_start = ScriptDataImpl::kHeaderSize + function_size;
+ if (function_size > 0) {
+ function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize,
+ symbol_start));
+ }
+ if (symbol_size > 0) {
+ symbol_store_.WriteTo(data.SubVector(symbol_start, total_size));
+ }
+ return data;
+ }
+ virtual int function_position() { return function_store_.size(); }
+ virtual int symbol_position() { return symbol_store_.size(); }
+ virtual int symbol_ids() { return symbol_id_; }
+ private:
+ Collector<unsigned> function_store_;
+ Collector<unsigned> symbol_store_;
+ Collector<Vector<const char> > symbol_entries_;
+ HashMap symbol_table_;
+ int symbol_id_;
+
+ static int vector_hash(Vector<const char> string) {
+ int hash = 0;
+ for (int i = 0; i < string.length(); i++) {
+ int c = string[i];
+ hash += c;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+ return hash;
+ }
-FunctionEntry ScriptDataImpl::GetFunctionEnd(int start) {
- if (nth(last_entry_).start_pos() > start) {
- // If the last entry we looked up is higher than what we're
- // looking for then it's useless and we reset it.
- last_entry_ = 0;
+ static bool vector_compare(void* a, void* b) {
+ Vector<const char>* string1 = reinterpret_cast<Vector<const char>* >(a);
+ Vector<const char>* string2 = reinterpret_cast<Vector<const char>* >(b);
+ int length = string1->length();
+ if (string2->length() != length) return false;
+ return memcmp(string1->start(), string2->start(), length) == 0;
}
- for (int i = last_entry_; i < EntryCount(); i++) {
- FunctionEntry entry = nth(i);
- if (entry.start_pos() == start) {
- last_entry_ = i;
- return entry;
- }
+
+ unsigned preamble_[ScriptDataImpl::kHeaderSize];
+#ifdef DEBUG
+ int prev_start;
+#endif
+
+ bool has_error() {
+ return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
}
- return FunctionEntry();
+ void WriteString(Vector<const char> str);
+};
+
+
+void ScriptDataImpl::SkipFunctionEntry(int start) {
+ ASSERT(function_index_ + FunctionEntry::kSize <= store_.length());
+ ASSERT(static_cast<int>(store_[function_index_]) == start);
+ function_index_ += FunctionEntry::kSize;
}
-bool ScriptDataImpl::SanityCheck() {
- if (store_.length() < static_cast<int>(ScriptDataImpl::kHeaderSize))
- return false;
- if (magic() != ScriptDataImpl::kMagicNumber)
- return false;
- if (version() != ScriptDataImpl::kCurrentVersion)
- return false;
- return true;
+FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
+ // The current pre-data entry must be a FunctionEntry with the given
+ // start position.
+ if ((function_index_ + FunctionEntry::kSize <= store_.length())
+ && (static_cast<int>(store_[function_index_]) == start)) {
+ int index = function_index_;
+ function_index_ += FunctionEntry::kSize;
+ return FunctionEntry(store_.SubVector(index,
+ index + FunctionEntry::kSize));
+ }
+ return FunctionEntry();
}
-int ScriptDataImpl::EntryCount() {
- return (store_.length() - kHeaderSize) / FunctionEntry::kSize;
+int ScriptDataImpl::GetSymbolIdentifier(int start) {
+ int next = symbol_index_ + 2;
+ if (next <= store_.length()
+ && static_cast<int>(store_[symbol_index_]) == start) {
+ symbol_index_ = next;
+ return store_[next - 1];
+ }
+ return symbol_id_++;
}
-FunctionEntry ScriptDataImpl::nth(int n) {
- int offset = kHeaderSize + n * FunctionEntry::kSize;
- return FunctionEntry(Vector<unsigned>(store_.start() + offset,
- FunctionEntry::kSize));
+
+bool ScriptDataImpl::SanityCheck() {
+ // Check that the header data is valid and doesn't specify
+ // point to positions outside the store.
+ if (store_.length() < ScriptDataImpl::kHeaderSize) return false;
+ if (magic() != ScriptDataImpl::kMagicNumber) return false;
+ if (version() != ScriptDataImpl::kCurrentVersion) return false;
+ if (has_error()) {
+ // Extra sane sanity check for error message encoding.
+ if (store_.length() <= kHeaderSize + kMessageTextPos) return false;
+ if (Read(kMessageStartPos) > Read(kMessageEndPos)) return false;
+ unsigned arg_count = Read(kMessageArgCountPos);
+ int pos = kMessageTextPos;
+ for (unsigned int i = 0; i <= arg_count; i++) {
+ if (store_.length() <= kHeaderSize + pos) return false;
+ int length = static_cast<int>(Read(pos));
+ if (length < 0) return false;
+ pos += 1 + length;
+ }
+ if (store_.length() < kHeaderSize + pos) return false;
+ return true;
+ }
+ // Check that the space allocated for function entries is sane.
+ int functions_size =
+ static_cast<int>(store_[ScriptDataImpl::kFunctionsSizeOffset]);
+ if (functions_size < 0) return false;
+ if (functions_size % FunctionEntry::kSize != 0) return false;
+ // Check that the count of symbols is non-negative.
+ int symbol_count =
+ static_cast<int>(store_[ScriptDataImpl::kSymbolCountOffset]);
+ if (symbol_count < 0) return false;
+ // Check that the total size has room both function entries.
+ int minimum_size =
+ ScriptDataImpl::kHeaderSize + functions_size;
+ if (store_.length() < minimum_size) return false;
+ return true;
}
ParserRecorder::ParserRecorder()
- : has_error_(false), store_(4) {
- Vector<unsigned> preamble = store()->AddBlock(0, ScriptDataImpl::kHeaderSize);
- preamble[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
- preamble[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
- preamble[ScriptDataImpl::kHasErrorOffset] = false;
+ : function_store_(0),
+ symbol_store_(0),
+ symbol_entries_(0),
+ symbol_table_(vector_compare),
+ symbol_id_(0) {
+#ifdef DEBUG
+ prev_start = -1;
+#endif
+ preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
+ preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
+ preamble_[ScriptDataImpl::kHasErrorOffset] = false;
+ preamble_[ScriptDataImpl::kFunctionsSizeOffset] = 0;
+ preamble_[ScriptDataImpl::kSymbolCountOffset] = 0;
+ preamble_[ScriptDataImpl::kSizeOffset] = 0;
+ ASSERT_EQ(6, ScriptDataImpl::kHeaderSize);
}
void ParserRecorder::WriteString(Vector<const char> str) {
- store()->Add(str.length());
- for (int i = 0; i < str.length(); i++)
- store()->Add(str[i]);
+ function_store_.Add(str.length());
+ for (int i = 0; i < str.length(); i++) {
+ function_store_.Add(str[i]);
+ }
}
-const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
+const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
int length = start[0];
char* result = NewArray<char>(length + 1);
- for (int i = 0; i < length; i++)
+ for (int i = 0; i < length; i++) {
result[i] = start[i + 1];
+ }
result[length] = '\0';
if (chars != NULL) *chars = length;
return result;
@@ -974,38 +1129,44 @@ const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
void ParserRecorder::LogMessage(Scanner::Location loc, const char* message,
Vector<const char*> args) {
- if (has_error_) return;
- store()->Rewind(ScriptDataImpl::kHeaderSize);
- store()->at(ScriptDataImpl::kHasErrorOffset) = true;
- store()->Add(loc.beg_pos);
- store()->Add(loc.end_pos);
- store()->Add(args.length());
+ if (has_error()) return;
+ preamble_[ScriptDataImpl::kHasErrorOffset] = true;
+ function_store_.Reset();
+ STATIC_ASSERT(ScriptDataImpl::kMessageStartPos == 0);
+ function_store_.Add(loc.beg_pos);
+ STATIC_ASSERT(ScriptDataImpl::kMessageEndPos == 1);
+ function_store_.Add(loc.end_pos);
+ STATIC_ASSERT(ScriptDataImpl::kMessageArgCountPos == 2);
+ function_store_.Add(args.length());
+ STATIC_ASSERT(ScriptDataImpl::kMessageTextPos == 3);
WriteString(CStrVector(message));
- for (int i = 0; i < args.length(); i++)
+ for (int i = 0; i < args.length(); i++) {
WriteString(CStrVector(args[i]));
+ }
}
Scanner::Location ScriptDataImpl::MessageLocation() {
- int beg_pos = Read(0);
- int end_pos = Read(1);
+ int beg_pos = Read(kMessageStartPos);
+ int end_pos = Read(kMessageEndPos);
return Scanner::Location(beg_pos, end_pos);
}
const char* ScriptDataImpl::BuildMessage() {
- unsigned* start = ReadAddress(3);
- return ParserRecorder::ReadString(start, NULL);
+ unsigned* start = ReadAddress(kMessageTextPos);
+ return ReadString(start, NULL);
}
Vector<const char*> ScriptDataImpl::BuildArgs() {
- int arg_count = Read(2);
+ int arg_count = Read(kMessageArgCountPos);
const char** array = NewArray<const char*>(arg_count);
- int pos = ScriptDataImpl::kHeaderSize + Read(3);
+ // Position after the string starting at position 3.
+ int pos = kMessageTextPos + 1 + Read(kMessageTextPos);
for (int i = 0; i < arg_count; i++) {
int count = 0;
- array[i] = ParserRecorder::ReadString(ReadAddress(pos), &count);
+ array[i] = ReadString(ReadAddress(pos), &count);
pos += count + 1;
}
return Vector<const char*>(array, arg_count);
@@ -1023,8 +1184,12 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
FunctionEntry ParserRecorder::LogFunction(int start) {
- if (has_error_) return FunctionEntry();
- FunctionEntry result(store()->AddBlock(0, FunctionEntry::kSize));
+#ifdef DEBUG
+ ASSERT(start > prev_start);
+ prev_start = start;
+#endif
+ if (has_error()) return FunctionEntry();
+ FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0));
result.set_start_pos(start);
return result;
}
@@ -1034,8 +1199,14 @@ class AstBuildingParser : public Parser {
public:
AstBuildingParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension, ScriptDataImpl* pre_data)
- : Parser(script, allow_natives_syntax, extension, PARSE,
- factory(), log(), pre_data) { }
+ : Parser(script,
+ allow_natives_syntax,
+ extension,
+ PARSE,
+ factory(),
+ log(),
+ pre_data),
+ factory_(pre_data ? pre_data->symbol_count() : 16) { }
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
Vector<const char*> args);
virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
@@ -1214,7 +1385,7 @@ Parser::Parser(Handle<Script> script,
log_(log),
is_pre_parsing_(is_pre_parsing == PREPARSE),
pre_data_(pre_data),
- seen_loop_stmt_(false) {
+ fni_(NULL) {
}
@@ -1243,6 +1414,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
HistogramTimerScope timer(&Counters::parse);
Counters::total_parse_size.Increment(source->length());
+ fni_ = new FuncNameInferrer();
// Initialize parser state.
source->TryFlatten();
@@ -1278,7 +1450,8 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
0,
0,
source->length(),
- false));
+ false,
+ temp_scope.ContainsLoops()));
} else if (scanner().stack_overflow()) {
Top::StackOverflow();
}
@@ -1303,6 +1476,9 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
HistogramTimerScope timer(&Counters::parse_lazy);
Counters::total_parse_size.Increment(source->length());
+ fni_ = new FuncNameInferrer();
+ fni_->PushEnclosingName(name);
+
// Initialize parser state.
source->TryFlatten();
scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
@@ -1375,7 +1551,8 @@ FunctionLiteral* Parser::ParseJson(Handle<String> source) {
0,
0,
source->length(),
- false));
+ false,
+ temp_scope.ContainsLoops()));
} else if (scanner().stack_overflow()) {
Top::StackOverflow();
}
@@ -1396,6 +1573,21 @@ void Parser::ReportMessage(const char* type, Vector<const char*> args) {
}
+Handle<String> Parser::GetSymbol(bool* ok) {
+ if (pre_data() != NULL) {
+ int symbol_id =
+ pre_data()->GetSymbolIdentifier(scanner_.location().beg_pos);
+ if (symbol_id < 0) {
+ ReportInvalidPreparseData(Factory::empty_symbol(), ok);
+ return Handle<String>::null();
+ }
+ return factory()->LookupSymbol(symbol_id, scanner_.literal());
+ }
+ log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
+ return factory()->LookupSymbol(-1, scanner_.literal());
+}
+
+
void AstBuildingParser::ReportMessageAt(Scanner::Location source_location,
const char* type,
Vector<const char*> args) {
@@ -2080,9 +2272,12 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
VariableProxy* last_var = NULL; // the last variable declared
int nvars = 0; // the number of variables declared
do {
+ if (fni_ != NULL) fni_->Enter();
+
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
Handle<String> name = ParseIdentifier(CHECK_OK);
+ if (fni_ != NULL) fni_->PushVariableName(name);
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
@@ -2134,6 +2329,8 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Expect(Token::ASSIGN, CHECK_OK);
position = scanner().location().beg_pos;
value = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ // Don't infer if it is "a = function(){...}();"-like expression.
+ if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
}
// Make sure that 'const c' actually initializes 'c' to undefined
@@ -2210,6 +2407,8 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Assignment* assignment = NEW(Assignment(op, last_var, value, position));
if (block) block->AddStatement(NEW(ExpressionStatement(assignment)));
}
+
+ if (fni_ != NULL) fni_->Leave();
} while (peek() == Token::COMMA);
if (!is_const && nvars == 1) {
@@ -2639,6 +2838,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
+ temp_scope_->AddLoop();
DoWhileStatement* loop = NEW(DoWhileStatement(labels));
Target target(this, loop);
@@ -2663,9 +2863,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
if (loop != NULL) loop->Initialize(cond, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2674,6 +2871,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
+ temp_scope_->AddLoop();
WhileStatement* loop = NEW(WhileStatement(labels));
Target target(this, loop);
@@ -2685,9 +2883,6 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
if (loop != NULL) loop->Initialize(cond, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2696,6 +2891,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ temp_scope_->AddLoop();
Statement* init = NULL;
Expect(Token::FOR, CHECK_OK);
@@ -2721,9 +2917,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Block* result = NEW(Block(NULL, 2, false));
result->AddStatement(variable_statement);
result->AddStatement(loop);
-
- seen_loop_stmt_ = true;
-
// Parsed for-in loop w/ variable/const declaration.
return result;
}
@@ -2752,9 +2945,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
if (loop) loop->Initialize(expression, enumerable, body);
-
- seen_loop_stmt_ = true;
-
// Parsed for-in loop.
return loop;
@@ -2785,17 +2975,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- seen_loop_stmt_ = false;
-
Statement* body = ParseStatement(NULL, CHECK_OK);
-
- // Mark this loop if it is an inner loop.
- if (loop && !seen_loop_stmt_) loop->set_peel_this_loop(true);
-
if (loop) loop->Initialize(init, cond, next, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2809,8 +2990,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
while (peek() == Token::COMMA) {
Expect(Token::COMMA, CHECK_OK);
+ int position = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = NEW(BinaryOperation(Token::COMMA, result, right));
+ result = NEW(BinaryOperation(Token::COMMA, result, right, position));
}
return result;
}
@@ -2822,9 +3004,11 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// ConditionalExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
+ if (fni_ != NULL) fni_->Enter();
Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
if (!Token::IsAssignmentOp(peek())) {
+ if (fni_ != NULL) fni_->Leave();
// Parsed conditional expression only (no assignment).
return expression;
}
@@ -2855,6 +3039,19 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
temp_scope_->AddProperty();
}
+ if (fni_ != NULL) {
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if ((op == Token::INIT_VAR
+ || op == Token::INIT_CONST
+ || op == Token::ASSIGN)
+ && (right->AsCall() == NULL)) {
+ fni_->Infer();
+ }
+ fni_->Leave();
+ }
+
return NEW(Assignment(op, expression, right, pos));
}
@@ -2898,6 +3095,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
+ int position = scanner().location().beg_pos;
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
@@ -2972,7 +3170,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this
- // code an AST node eventually.)
+ // code and AST node eventually.)
if (Token::IsCompareOp(op)) {
// We have a comparison.
Token::Value cmp = op;
@@ -2981,7 +3179,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = NEW(CompareOperation(cmp, x, y));
+ x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
x = NEW(UnaryOperation(Token::NOT, x));
@@ -2989,7 +3187,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
} else {
// We have a "normal" binary operation.
- x = NEW(BinaryOperation(op, x, y));
+ x = NEW(BinaryOperation(op, x, y, position));
}
}
}
@@ -2997,6 +3195,27 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
}
+Expression* Parser::NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position) {
+ ASSERT(op != Token::NE && op != Token::NE_STRICT);
+ if (!is_pre_parsing_ && (op == Token::EQ || op == Token::EQ_STRICT)) {
+ bool is_strict = (op == Token::EQ_STRICT);
+ Literal* x_literal = x->AsLiteral();
+ if (x_literal != NULL && x_literal->IsNull()) {
+ return NEW(CompareToNull(is_strict, y));
+ }
+
+ Literal* y_literal = y->AsLiteral();
+ if (y_literal != NULL && y_literal->IsNull()) {
+ return NEW(CompareToNull(is_strict, x));
+ }
+ }
+ return NEW(CompareOperation(op, x, y, position));
+}
+
+
Expression* Parser::ParseUnaryExpression(bool* ok) {
// UnaryExpression ::
// PostfixExpression
@@ -3043,7 +3262,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
- return NEW(CountOperation(true /* prefix */, op, expression));
+ int position = scanner().location().beg_pos;
+ IncrementOperation* increment = NEW(IncrementOperation(op, expression));
+ return NEW(CountOperation(true /* prefix */, increment, position));
} else {
return ParsePostfixExpression(ok);
@@ -3066,7 +3287,9 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
expression = NewThrowReferenceError(type);
}
Token::Value next = Next();
- expression = NEW(CountOperation(false /* postfix */, next, expression));
+ int position = scanner().location().beg_pos;
+ IncrementOperation* increment = NEW(IncrementOperation(next, expression));
+ expression = NEW(CountOperation(false /* postfix */, increment, position));
}
return expression;
}
@@ -3125,6 +3348,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3211,6 +3435,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
case Token::LPAREN: {
@@ -3321,6 +3546,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::IDENTIFIER: {
Handle<String> name = ParseIdentifier(CHECK_OK);
+ if (fni_ != NULL) fni_->PushVariableName(name);
if (is_pre_parsing_) {
result = VariableProxySentinel::identifier_proxy();
} else {
@@ -3332,17 +3558,16 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
result = NewNumberLiteral(value);
break;
}
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> symbol =
- factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ Handle<String> symbol = GetSymbol(CHECK_OK);
result = NEW(Literal(symbol));
+ if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3461,6 +3686,12 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
}
+ // Simple and shallow arrays can be lazily copied, we transform the
+ // elements array to a copy-on-write array.
+ if (is_simple && depth == 1 && values.length() > 0) {
+ literals->set_map(Heap::fixed_cow_array_map());
+ }
+
return NEW(ArrayLiteral(literals, values.elements(),
literal_index, is_simple, depth));
}
@@ -3477,6 +3708,19 @@ bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
return lit != NULL && lit->is_simple();
}
+
+bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
+ Expression* value) {
+ // If value is a literal the property value is already set in the
+ // boilerplate object.
+ if (value->AsLiteral() != NULL) return false;
+ // If value is a materialized literal the property value is already set
+ // in the boilerplate object if it is simple.
+ if (CompileTimeValue::IsCompileTimeValue(value)) return false;
+ return true;
+}
+
+
Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ASSERT(IsCompileTimeValue(expression));
Handle<FixedArray> result = Factory::NewFixedArray(2, TENURED);
@@ -3589,9 +3833,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
Token::Value next = Next();
// TODO(820): Allow NUMBER and STRING as well (and handle array indices).
if (next == Token::IDENTIFIER || Token::IsKeyword(next)) {
- Handle<String> name =
- factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ Handle<String> name = GetSymbol(CHECK_OK);
FunctionLiteral* value =
ParseFunctionLiteral(name,
RelocInfo::kNoPosition,
@@ -3621,6 +3863,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
+ if (fni_ != NULL) fni_->Enter();
+
Literal* key = NULL;
Token::Value next = peek();
switch (next) {
@@ -3629,6 +3873,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
bool is_setter = false;
Handle<String> id =
ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (fni_ != NULL) fni_->PushLiteralName(id);
+
if ((is_getter || is_setter) && peek() != Token::COLON) {
ObjectLiteral::Property* property =
ParseObjectLiteralGetSet(is_getter, CHECK_OK);
@@ -3637,6 +3883,11 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
properties.Add(property);
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
continue; // restart the while
}
// Failed to parse as get/set property, so it's just a property
@@ -3646,9 +3897,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> string =
- factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ Handle<String> string = GetSymbol(CHECK_OK);
+ if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
key = NewNumberLiteral(index);
@@ -3660,16 +3910,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
key = NewNumberLiteral(value);
break;
}
default:
if (Token::IsKeyword(next)) {
Consume(next);
- Handle<String> string =
- factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ Handle<String> string = GetSymbol(CHECK_OK);
key = NEW(Literal(string));
} else {
// Unexpected token.
@@ -3692,6 +3940,11 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// TODO(1240767): Consider allowing trailing comma.
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
}
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
@@ -3776,10 +4029,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
-
- // Reset flag used for inner loop detection.
- seen_loop_stmt_ = false;
-
bool is_named = !var_name.is_null();
// The name associated with this function. If it's a function expression,
@@ -3843,43 +4092,61 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool is_lazily_compiled =
mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+ int function_block_pos = scanner_.location().beg_pos;
int materialized_literal_count;
int expected_property_count;
+ int end_pos;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
- FunctionEntry entry = pre_data()->GetFunctionEnd(start_pos);
+ FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
if (!entry.is_valid()) {
ReportInvalidPreparseData(name, CHECK_OK);
}
- int end_pos = entry.end_pos();
- if (end_pos <= start_pos) {
+ end_pos = entry.end_pos();
+ if (end_pos <= function_block_pos) {
// End position greater than end of stream is safe, and hard to check.
ReportInvalidPreparseData(name, CHECK_OK);
}
- Counters::total_preparse_skipped.Increment(end_pos - start_pos);
+ Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
+ pre_data()->Skip(entry.predata_function_skip(),
+ entry.predata_symbol_skip(),
+ entry.symbol_id_skip());
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
+ Expect(Token::RBRACE, CHECK_OK);
} else {
+ if (pre_data() != NULL) {
+ // Skip pre-data entry for non-lazily compiled function.
+ pre_data()->SkipFunctionEntry(function_block_pos);
+ }
+ FunctionEntry entry = log()->LogFunction(function_block_pos);
+ int predata_function_position_before = log()->function_position();
+ int predata_symbol_position_before = log()->symbol_position();
+ int symbol_ids_before = log()->symbol_ids();
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
only_simple_this_property_assignments =
temp_scope.only_simple_this_property_assignments();
this_property_assignments = temp_scope.this_property_assignments();
- }
- Expect(Token::RBRACE, CHECK_OK);
- int end_pos = scanner_.location().end_pos;
-
- FunctionEntry entry = log()->LogFunction(start_pos);
- if (entry.is_valid()) {
- entry.set_end_pos(end_pos);
- entry.set_literal_count(materialized_literal_count);
- entry.set_property_count(expected_property_count);
+ Expect(Token::RBRACE, CHECK_OK);
+ end_pos = scanner_.location().end_pos;
+ if (entry.is_valid()) {
+ entry.set_end_pos(end_pos);
+ entry.set_literal_count(materialized_literal_count);
+ entry.set_property_count(expected_property_count);
+ entry.set_predata_function_skip(
+ log()->function_position() - predata_function_position_before);
+ entry.set_predata_symbol_skip(
+ log()->symbol_position() - predata_symbol_position_before);
+ entry.set_symbol_id_skip(
+ log()->symbol_ids() - symbol_ids_before);
+ }
}
FunctionLiteral* function_literal =
@@ -3893,16 +4160,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
num_parameters,
start_pos,
end_pos,
- function_name->length() > 0));
+ function_name->length() > 0,
+ temp_scope.ContainsLoops()));
if (!is_pre_parsing_) {
function_literal->set_function_token_position(function_token_position);
}
- // Set flag for inner loop detection. We treat loops that contain a function
- // literal not as inner loops because we avoid duplicating function literals
- // when peeling or unrolling such a loop.
- seen_loop_stmt_ = true;
-
+ if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
return function_literal;
}
}
@@ -3915,7 +4179,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Runtime::Function* function =
- Runtime::FunctionForName(scanner_.literal_string());
+ Runtime::FunctionForName(scanner_.literal());
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
if (function == NULL && extension_ != NULL) {
// The extension structures are only accessible while parsing the
@@ -4031,8 +4295,7 @@ Literal* Parser::GetLiteralNumber(double value) {
Handle<String> Parser::ParseIdentifier(bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return Handle<String>();
- return factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ return GetSymbol(ok);
}
@@ -4043,8 +4306,7 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
*ok = false;
return Handle<String>();
}
- return factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ return GetSymbol(ok);
}
@@ -4062,8 +4324,7 @@ Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
*is_get = strcmp(token, "get") == 0;
*is_set = !*is_get && strcmp(token, "set") == 0;
}
- return factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ return GetSymbol(ok);
}
@@ -4165,7 +4426,11 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
- array->SetFastElement(i, *element);
+ Object* ok = array->SetFastElement(i, *element);
+ USE(ok); // Don't get an unused variable warning.
+ // We know this doesn't cause a GC here because we allocated the JSArray
+ // large enough.
+ ASSERT(!ok->IsFailure());
}
}
ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
@@ -4202,7 +4467,7 @@ Expression* Parser::ParseJsonValue(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
ASSERT(scanner_.literal_length() > 0);
- double value = StringToDouble(scanner_.literal_string(),
+ double value = StringToDouble(scanner_.literal(),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
return NewNumberLiteral(value);
@@ -4241,8 +4506,7 @@ Expression* Parser::ParseJsonObject(bool* ok) {
if (peek() != Token::RBRACE) {
do {
Expect(Token::STRING, CHECK_OK);
- Handle<String> key = factory()->LookupSymbol(scanner_.literal_string(),
- scanner_.literal_length());
+ Handle<String> key = GetSymbol(CHECK_OK);
Expect(Token::COLON, CHECK_OK);
Expression* value = ParseJsonValue(CHECK_OK);
Literal* key_literal;
@@ -5130,7 +5394,7 @@ ParserMessage::~ParserMessage() {
ScriptDataImpl::~ScriptDataImpl() {
- store_.Dispose();
+ if (owns_store_) store_.Dispose();
}
@@ -5159,10 +5423,9 @@ ScriptDataImpl* PreParse(Handle<String> source,
Bootstrapper::IsActive();
PreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
- // The list owns the backing store so we need to clone the vector.
- // That way, the result will be exactly the right size rather than
- // the expected 50% too large.
- Vector<unsigned> store = parser.recorder()->store()->ToVector().Clone();
+ // Extract the accumulated data from the recorder as a single
+ // contiguous vector that we are responsible for disposing.
+ Vector<unsigned> store = parser.recorder()->ExtractData();
return new ScriptDataImpl(store);
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 89966a6fef..56412a05a5 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -68,11 +68,29 @@ class FunctionEntry BASE_EMBEDDED {
void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
int property_count() { return backing_[kPropertyCountOffset]; }
- void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
+ void set_property_count(int value) {
+ backing_[kPropertyCountOffset] = value;
+ }
+
+ int predata_function_skip() { return backing_[kPredataFunctionSkipOffset]; }
+ void set_predata_function_skip(int value) {
+ backing_[kPredataFunctionSkipOffset] = value;
+ }
+
+ int predata_symbol_skip() { return backing_[kPredataSymbolSkipOffset]; }
+ void set_predata_symbol_skip(int value) {
+ backing_[kPredataSymbolSkipOffset] = value;
+ }
+
+ int symbol_id_skip() { return backing_[kSymbolIdSkipOffset]; }
+ void set_symbol_id_skip(int value) {
+ backing_[kSymbolIdSkipOffset] = value;
+ }
+
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 4;
+ static const int kSize = 7;
private:
Vector<unsigned> backing_;
@@ -80,6 +98,9 @@ class FunctionEntry BASE_EMBEDDED {
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
+ static const int kPredataFunctionSkipOffset = 4;
+ static const int kPredataSymbolSkipOffset = 5;
+ static const int kSymbolIdSkipOffset = 6;
};
@@ -87,43 +108,98 @@ class ScriptDataImpl : public ScriptData {
public:
explicit ScriptDataImpl(Vector<unsigned> store)
: store_(store),
- last_entry_(0) { }
+ function_index_(kHeaderSize),
+ symbol_id_(0),
+ owns_store_(true) {
+ Initialize();
+ }
+
+ void Initialize() {
+ if (store_.length() >= kHeaderSize) {
+ // Otherwise we won't satisfy the SanityCheck.
+ symbol_index_ = kHeaderSize + store_[kFunctionsSizeOffset];
+ }
+ }
+
+ // Create an empty ScriptDataImpl that is guaranteed to not satisfy
+ // a SanityCheck.
+ ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
+
virtual ~ScriptDataImpl();
virtual int Length();
virtual const char* Data();
virtual bool HasError();
- FunctionEntry GetFunctionEnd(int start);
+
+ FunctionEntry GetFunctionEntry(int start);
+ int GetSymbolIdentifier(int start);
+ void SkipFunctionEntry(int start);
bool SanityCheck();
Scanner::Location MessageLocation();
const char* BuildMessage();
Vector<const char*> BuildArgs();
+ int symbol_count() {
+ return (store_.length() > kHeaderSize) ? store_[kSymbolCountOffset] : 0;
+ }
+ // The following functions should only be called if SanityCheck has
+ // returned true.
bool has_error() { return store_[kHasErrorOffset]; }
unsigned magic() { return store_[kMagicOffset]; }
unsigned version() { return store_[kVersionOffset]; }
+ // Skip forward in the preparser data by the given number
+ // of unsigned ints.
+ virtual void Skip(int function_entries, int symbol_entries, int symbol_ids) {
+ ASSERT(function_entries >= 0);
+ ASSERT(function_entries
+ <= (static_cast<int>(store_[kFunctionsSizeOffset])
+ - (function_index_ - kHeaderSize)));
+ function_index_ += function_entries;
+ symbol_index_ += symbol_entries;
+ symbol_id_ += symbol_ids;
+ }
+
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 1;
+ static const unsigned kCurrentVersion = 2;
- static const unsigned kMagicOffset = 0;
- static const unsigned kVersionOffset = 1;
- static const unsigned kHasErrorOffset = 2;
- static const unsigned kSizeOffset = 3;
- static const unsigned kHeaderSize = 4;
+ static const int kMagicOffset = 0;
+ static const int kVersionOffset = 1;
+ static const int kHasErrorOffset = 2;
+ static const int kFunctionsSizeOffset = 3;
+ static const int kSymbolCountOffset = 4;
+ static const int kSizeOffset = 5;
+ static const int kHeaderSize = 6;
+
+ static const int kMessageStartPos = 0;
+ static const int kMessageEndPos = 1;
+ static const int kMessageArgCountPos = 2;
+ static const int kMessageTextPos = 3;
private:
+ Vector<unsigned> store_;
+ int function_index_;
+ int symbol_index_;
+ int symbol_id_;
+ bool owns_store_;
+
unsigned Read(int position);
unsigned* ReadAddress(int position);
- int EntryCount();
- FunctionEntry nth(int n);
- Vector<unsigned> store_;
+ ScriptDataImpl(const char* backing_store, int length)
+ : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
+ length / sizeof(unsigned)),
+ function_index_(kHeaderSize),
+ symbol_id_(0),
+ owns_store_(false) {
+ ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
+ Initialize();
+ }
+
+ // Read strings written by ParserRecorder::WriteString.
+ static const char* ReadString(unsigned* start, int* chars);
- // The last entry returned. This is used to make lookup faster:
- // the next entry to return is typically the next entry so lookup
- // will usually be much faster if we start from the last entry.
- int last_entry_;
+ friend class ScriptData;
};
@@ -175,6 +251,8 @@ class CompileTimeValue: public AllStatic {
static bool IsCompileTimeValue(Expression* expression);
+ static bool ArrayLiteralElementNeedsInitialization(Expression* value);
+
// Get the value as a compile time value.
static Handle<FixedArray> GetValue(Expression* expression);
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
deleted file mode 100644
index 34410e8d0d..0000000000
--- a/deps/v8/src/platform-cygwin.cc
+++ /dev/null
@@ -1,858 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-// Ubuntu Dapper requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <fcntl.h> // open
-#include <unistd.h> // sysconf
-#ifdef __GLIBC__
-#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // def __GLIBC__
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "top.h"
-#include "v8threads.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // Here gcc is telling us that we are on an ARM and gcc is assuming that we
- // have VFP3 instructions. If gcc can assume it then so can we.
- return 1u << VFP3;
-#elif CAN_USE_ARMV7_INSTRUCTIONS
- return 1u << ARMv7;
-#else
- return 0; // Linux runs on anything.
-#endif
-}
-
-
-#ifdef __arm__
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- switch (feature) {
- case VFP3:
- search_string = "vfp";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __arm__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Cywin.
-}
-
-
-double OS::LocalTimeOffset() {
- //
- // On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- return static_cast<double>((mktime(loc) - utc) * msPerSecond);
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__)) && \
- defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-#elif defined(__mips__)
- asm("break");
-#else
- asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to setup
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-#endif
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#ifdef __GLIBC__
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-#else // ndef __GLIBC__
- return 0;
-#endif // ndef __GLIBC__
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
-#ifdef HAS_MAP_FIXED
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-#else
- if (mprotect(address, size, prot) != 0) {
- return false;
- }
-#endif
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class CygwinMutex : public Mutex {
- public:
-
- CygwinMutex() {
- pthread_mutexattr_t attrs;
- memset(&attrs, 0, sizeof(attrs));
-
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new CygwinMutex();
-}
-
-
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_thread_ = 0;
-
-
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
-// Android runs a fairly new Linux kernel, so signal info is there,
-// but the C library doesn't have the structs defined.
-
-struct sigcontext {
- uint32_t trap_no;
- uint32_t error_code;
- uint32_t oldmask;
- uint32_t gregs[16];
- uint32_t arm_cpsr;
- uint32_t fault_address;
-};
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
-} ucontext_t;
-enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-
-#endif
-
-
-// A function that determines if a signal handler is called in the context
-// of a VM thread.
-//
-// The problem is that SIGPROF signal can be delivered to an arbitrary thread
-// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
-// So, if the signal is being handled in the context of a non-VM thread,
-// it means that the VM thread is running, and trying to sample its stack can
-// cause a crash.
-static inline bool IsVmThread() {
- // In the case of a single VM thread, this check is enough.
- if (pthread_equal(pthread_self(), vm_thread_)) return true;
- // If there are multiple threads that use VM, they must have a thread id
- // stored in TLS. To verify that the thread is really executing VM,
- // we check Top's data. Having that ThreadManager::RestoreThread first
- // restores ThreadLocalTop from TLS, and only then erases the TLS value,
- // reading Top::thread_id() should not be affected by races.
- if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
- ThreadManager::CurrentId() == Top::thread_id()) {
- return true;
- }
- return false;
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // We always sample the VM state.
- sample->state = VMState::current_state();
-
-#if 0
- // If profiling, we extract the current pc and sp.
- if (active_sampler_->IsProfiling()) {
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
-#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
-#elif V8_HOST_ARCH_MIPS
- // Implement this on MIPS.
- UNIMPLEMENTED();
-#endif
- if (IsVmThread()) {
- active_sampler_->SampleStack(sample);
- }
- }
-#endif
-
- active_sampler_->Tick(sample);
-#endif
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- signal_handler_installed_ = false;
- }
-
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
-};
-
-
-Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
- data_ = new PlatformData();
-}
-
-
-Sampler::~Sampler() {
- delete data_;
-}
-
-
-void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- vm_thread_ = pthread_self();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
-}
-
-
-void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
-}
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 9b8b20675f..ae44944f2b 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -198,9 +198,10 @@ void OS::Abort() {
void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__)) && \
- defined(CAN_USE_ARMV5_INSTRUCTIONS)
+#if (defined(__arm__) || defined(__thumb__))
+# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
+# endif
#else
asm("int $3");
#endif
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 58ff15408d..05ed9eeee1 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -196,9 +196,10 @@ void OS::Abort() {
void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__)) && \
- defined(CAN_USE_ARMV5_INSTRUCTIONS)
+#if (defined(__arm__) || defined(__thumb__))
+# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
+# endif
#else
asm("int $3");
#endif
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index b75867cfc9..e9e7c22367 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -302,7 +302,7 @@ class VirtualMemory {
void* address() {
ASSERT(IsReserved());
return address_;
- };
+ }
// Returns the size of the reserved memory.
size_t size() { return size_; }
@@ -363,11 +363,13 @@ class ThreadHandle {
class Thread: public ThreadHandle {
public:
// Opaque data type for thread-local storage keys.
-#ifndef __CYGWIN__
- enum LocalStorageKey {};
-#else
- typedef void *LocalStorageKey;
-#endif
+ // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
+ // to ensure that enumeration type has correct value range (see Issue 830 for
+ // more details).
+ enum LocalStorageKey {
+ LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
+ LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
+ };
// Create new thread.
Thread();
diff --git a/deps/v8/src/platform.h.orig b/deps/v8/src/platform.h.orig
deleted file mode 100644
index 7539fd2ddb..0000000000
--- a/deps/v8/src/platform.h.orig
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module contains the platform-specific code. This make the rest of the
-// code less dependent on operating system, compilers and runtime libraries.
-// This module does specifically not deal with differences between different
-// processor architecture.
-// The platform classes have the same definition for all platforms. The
-// implementation for a particular platform is put in platform_<os>.cc.
-// The build system then uses the implementation for the target platform.
-//
-// This design has been chosen because it is simple and fast. Alternatively,
-// the platform dependent classes could have been implemented using abstract
-// superclasses with virtual methods and having specializations for each
-// platform. This design was rejected because it was more complicated and
-// slower. It would require factory methods for selecting the right
-// implementation and the overhead of virtual methods for performance
-// sensitive like mutex locking/unlocking.
-
-#ifndef V8_PLATFORM_H_
-#define V8_PLATFORM_H_
-
-#define V8_INFINITY INFINITY
-
-// Windows specific stuff.
-#ifdef WIN32
-
-// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
-
-enum {
- FP_NAN,
- FP_INFINITE,
- FP_ZERO,
- FP_SUBNORMAL,
- FP_NORMAL
-};
-
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
-
-namespace v8 {
-namespace internal {
-int isfinite(double x);
-} }
-int isnan(double x);
-int isinf(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-#endif // _MSC_VER
-
-// Random is missing on both Visual Studio and MinGW.
-int random();
-
-#endif // WIN32
-
-
-#ifdef __sun
-# ifndef signbit
-int signbit(double x);
-# endif
-#endif
-
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
-// warning flag and certain versions of GCC due to a bug:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
-// For now, we use the more involved template-based version from <limits>, but
-// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-
-#endif // __GNUC__
-
-namespace v8 {
-namespace internal {
-
-// Use AtomicWord for a machine-sized pointer. It is assumed that
-// reads and writes of naturally aligned values of this type are atomic.
-typedef intptr_t AtomicWord;
-
-class Semaphore;
-
-double ceiling(double x);
-double modulo(double x, double y);
-
-// Forward declarations.
-class Socket;
-
-// ----------------------------------------------------------------------------
-// OS
-//
-// This class has static methods for the different platform specific
-// functions. Add methods here to cope with differences between the
-// supported platforms.
-
-class OS {
- public:
- // Initializes the platform OS support. Called once at VM startup.
- static void Setup();
-
- // Returns the accumulated user time for thread. This routine
- // can be used for profiling. The implementation should
- // strive for high-precision timer resolution, preferable
- // micro-second resolution.
- static int GetUserTime(uint32_t* secs, uint32_t* usecs);
-
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
- // Returns current time as the number of milliseconds since
- // 00:00:00 UTC, January 1, 1970.
- static double TimeCurrentMillis();
-
- // Returns a string identifying the current time zone. The
- // timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
-
- // Returns the local time offset in milliseconds east of UTC without
- // taking daylight savings time into account.
- static double LocalTimeOffset();
-
- // Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
-
- // Returns last OS error.
- static int GetLastError();
-
- static FILE* FOpen(const char* path, const char* mode);
-
- // Log file open mode is platform-dependent due to line ends issues.
- static const char* LogFileOpenMode;
-
- // Print output to console. This is mostly used for debugging output.
- // On platforms that has standard terminal output, the output
- // should go to stdout.
- static void Print(const char* format, ...);
- static void VPrint(const char* format, va_list args);
-
- // Print error output to console. This is mostly used for error message
- // output. On platforms that has standard terminal output, the output
- // should go to stderr.
- static void PrintError(const char* format, ...);
- static void VPrintError(const char* format, va_list args);
-
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable);
- static void Free(void* address, const size_t size);
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
-
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect a block of memory by marking it read-only/writable.
- static void Protect(void* address, size_t size);
- static void Unprotect(void* address, size_t size, bool is_executable);
-#endif
-
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
- // Sleep for a number of milliseconds.
- static void Sleep(const int milliseconds);
-
- // Abort the current process.
- static void Abort();
-
- // Debug break.
- static void DebugBreak();
-
- // Walk the stack.
- static const int kStackWalkError = -1;
- static const int kStackWalkMaxNameLen = 256;
- static const int kStackWalkMaxTextLen = 256;
- struct StackFrame {
- void* address;
- char text[kStackWalkMaxTextLen];
- };
-
- static int StackWalk(Vector<StackFrame> frames);
-
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
- class MemoryMappedFile {
- public:
- static MemoryMappedFile* create(const char* name, int size, void* initial);
- virtual ~MemoryMappedFile() { }
- virtual void* memory() = 0;
- };
-
- // Safe formatting print. Ensures that str is always null-terminated.
- // Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(Vector<char> str, const char* format, ...);
- static int VSNPrintF(Vector<char> str,
- const char* format,
- va_list args);
-
- static char* StrChr(char* str, int c);
- static void StrNCpy(Vector<char> dest, const char* src, size_t n);
-
- // Support for profiler. Can do nothing, in which case ticks
- // occuring in shared libraries will not be properly accounted
- // for.
- static void LogSharedLibraryAddresses();
-
- // The return value indicates the CPU features we are sure of because of the
- // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
- // instructions.
- // This is a little messy because the interpretation is subject to the cross
- // of the CPU and the OS. The bits in the answer correspond to the bit
- // positions indicated by the members of the CpuFeature enum from globals.h
- static uint64_t CpuFeaturesImpliedByPlatform();
-
- // Returns the double constant NAN
- static double nan_value();
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
- // Returns the activation frame alignment constraint or zero if
- // the platform doesn't care. Guaranteed to be a power of two.
- static int ActivationFrameAlignment();
-
- static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
- private:
- static const int msPerSecond = 1000;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
-};
-
-
-class VirtualMemory {
- public:
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size);
- ~VirtualMemory();
-
- // Returns whether the memory has been reserved.
- bool IsReserved();
-
- // Returns the start address of the reserved memory.
- void* address() {
- ASSERT(IsReserved());
- return address_;
- };
-
- // Returns the size of the reserved memory.
- size_t size() { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
-
- private:
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-
-// ----------------------------------------------------------------------------
-// ThreadHandle
-//
-// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
-// does not own the underlying os handle. Thread handles can be used for
-// refering to threads and testing equality.
-
-class ThreadHandle {
- public:
- enum Kind { SELF, INVALID };
- explicit ThreadHandle(Kind kind);
-
- // Destructor.
- ~ThreadHandle();
-
- // Test for thread running.
- bool IsSelf() const;
-
- // Test for valid thread handle.
- bool IsValid() const;
-
- // Get platform-specific data.
- class PlatformData;
- PlatformData* thread_handle_data() { return data_; }
-
- // Initialize the handle to kind
- void Initialize(Kind kind);
-
- private:
- PlatformData* data_; // Captures platform dependent data.
-};
-
-
-// ----------------------------------------------------------------------------
-// Thread
-//
-// Thread objects are used for creating and running threads. When the start()
-// method is called the new thread starts running the run() method in the new
-// thread. The Thread object should not be deallocated before the thread has
-// terminated.
-
-class Thread: public ThreadHandle {
- public:
- // Opaque data type for thread-local storage keys.
- enum LocalStorageKey {};
-
- // Create new thread.
- Thread();
- virtual ~Thread();
-
- // Start new thread by calling the Run() method in the new thread.
- void Start();
-
- // Wait until thread terminates.
- void Join();
-
- // Abstract method for run handler.
- virtual void Run() = 0;
-
- // Thread-local storage.
- static LocalStorageKey CreateThreadLocalKey();
- static void DeleteThreadLocalKey(LocalStorageKey key);
- static void* GetThreadLocal(LocalStorageKey key);
- static int GetThreadLocalInt(LocalStorageKey key) {
- return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
- }
- static void SetThreadLocal(LocalStorageKey key, void* value);
- static void SetThreadLocalInt(LocalStorageKey key, int value) {
- SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
- }
- static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
- }
-
- // A hint to the scheduler to let another thread run.
- static void YieldCPU();
-
- private:
- class PlatformData;
- PlatformData* data_;
- DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
-// of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passsed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool Setup();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-class TickSample {
- public:
- TickSample()
- : state(OTHER),
- pc(NULL),
- sp(NULL),
- fp(NULL),
- function(NULL),
- frames_count(0) {}
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- Address function; // The last called JS function.
- static const int kMaxFramesCount = 64;
- Address stack[kMaxFramesCount]; // Call stack.
- int frames_count; // Number of captured frames.
-};
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-class Sampler {
- public:
- // Initialize sampler.
- explicit Sampler(int interval, bool profiling);
- virtual ~Sampler();
-
- // Performs stack sampling.
- virtual void SampleStack(TickSample* sample) = 0;
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Is the sampler used for profiling.
- inline bool IsProfiling() { return profiling_; }
-
- // Whether the sampler is running (that is, consumes resources).
- inline bool IsActive() { return active_; }
-
- class PlatformData;
-
- private:
- const int interval_;
- const bool profiling_;
- bool active_;
- PlatformData* data_; // Platform specific data.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_H_
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 75f6fc3cbd..211f3f6360 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -376,6 +376,11 @@ void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
}
+void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void PrettyPrinter::VisitCountOperation(CountOperation* node) {
Print("(");
if (node->is_prefix()) Print("%s", Token::String(node->op()));
@@ -403,6 +408,13 @@ void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
+ Print("(");
+ Visit(node->expression());
+ Print("%s null)", Token::String(node->op()));
+}
+
+
void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
Print("<this-function>");
}
@@ -604,11 +616,6 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->Print(StaticType::Type2String(expr->type()));
printed_first = true;
}
- if (expr->num() != AstNode::kNoNumber) {
- ast_printer_->Print(printed_first ? ", num = " : " (num = ");
- ast_printer_->Print("%d", expr->num());
- printed_first = true;
- }
if (printed_first) ast_printer_->Print(")");
}
ast_printer_->Print("\n");
@@ -667,9 +674,7 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type,
- int num,
- bool is_primitive) {
+ StaticType* type) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
@@ -680,11 +685,6 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
pos += OS::SNPrintF(buf + pos, ", type = %s",
StaticType::Type2String(type));
}
- if (num != AstNode::kNoNumber) {
- pos += OS::SNPrintF(buf + pos, ", num = %d", num);
- }
- pos += OS::SNPrintF(buf + pos,
- is_primitive ? ", primitive" : ", non-primitive");
OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@@ -742,9 +742,7 @@ void AstPrinter::PrintParameters(Scope* scope) {
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
scope->parameter(i)->name(),
- scope->parameter(i)->type(),
- AstNode::kNoNumber,
- false);
+ scope->parameter(i)->type());
}
}
}
@@ -789,9 +787,7 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->AsVariable(),
node->proxy()->name(),
- node->proxy()->AsVariable()->type(),
- AstNode::kNoNumber,
- node->proxy()->IsPrimitive());
+ node->proxy()->AsVariable()->type());
} else {
// function declarations
PrintIndented("FUNCTION ");
@@ -1027,7 +1023,7 @@ void AstPrinter::VisitSlot(Slot* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type(), node->num(), node->IsPrimitive());
+ node->type());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
IndentedScope indent;
@@ -1086,6 +1082,11 @@ void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
}
+void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
if (node->type()->IsKnown()) {
@@ -1115,6 +1116,15 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void AstPrinter::VisitCompareToNull(CompareToNull* node) {
+ const char* name = node->is_strict()
+ ? "COMPARE-TO-NULL-STRICT"
+ : "COMPARE-TO-NULL";
+ IndentedScope indent(name, node);
+ Visit(node->expression());
+}
+
+
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent("THIS-FUNCTION");
}
@@ -1477,6 +1487,11 @@ void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
}
+void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
TagScope tag(this, "CountOperation");
{
@@ -1510,6 +1525,16 @@ void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
}
+void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
+ TagScope tag(this, "CompareToNull");
+ {
+ AttributesScope attributes(this);
+ AddAttribute("is_strict", expr->is_strict());
+ }
+ Visit(expr->expression());
+}
+
+
void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
TagScope tag(this, "ThisFunction");
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 93ba0d95a8..dfff49a45a 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -102,9 +102,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type,
- int num,
- bool is_primitive);
+ StaticType* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 0c50581ab7..cef825da8a 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -35,6 +35,16 @@
namespace v8 {
namespace internal {
+const char* StringsStorage::GetFunctionName(String* name) {
+ return GetFunctionName(GetName(name));
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
+}
+
+
CodeEntry::CodeEntry(int security_token_id)
: call_uid_(0),
tag_(Logger::FUNCTION_TAG),
@@ -97,13 +107,21 @@ void CodeMap::DeleteCode(Address addr) {
}
-const char* CpuProfilesCollection::GetFunctionName(String* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* CpuProfilesCollection::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
+template<class Visitor>
+void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
+ entry_info->entry = visitor->GetEntry(
+ reinterpret_cast<HeapObject*>(p->key),
+ entry_info->children_count,
+ entry_info->retainers_count);
+ entry_info->children_count = 0;
+ entry_info->retainers_count = 0;
+ }
+ }
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index cd46badf09..2de7a2fb91 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -492,6 +492,10 @@ CpuProfilesCollection::~CpuProfilesCollection() {
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
ASSERT(uid > 0);
current_profiles_semaphore_->Wait();
+ if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
+ current_profiles_semaphore_->Signal();
+ return false;
+ }
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
@@ -824,13 +828,6 @@ HeapEntry* HeapGraphEdge::From() {
void HeapEntry::Init(HeapSnapshot* snapshot,
- int children_count,
- int retainers_count) {
- Init(snapshot, kInternal, "", 0, 0, children_count, retainers_count);
-}
-
-
-void HeapEntry::Init(HeapSnapshot* snapshot,
Type type,
const char* name,
uint64_t id,
@@ -1210,9 +1207,11 @@ template <> struct SnapshotSizeConstants<8> {
} // namespace
HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ HeapSnapshot::Type type,
const char* title,
unsigned uid)
: collection_(collection),
+ type_(type),
title_(title),
uid_(uid),
root_entry_index_(-1),
@@ -1243,6 +1242,10 @@ void HeapSnapshot::AllocateEntries(int entries_count,
ASSERT(raw_entries_ == NULL);
raw_entries_ = NewArray<char>(
HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
+#ifdef DEBUG
+ raw_entries_size_ =
+ HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
+#endif
}
@@ -1252,9 +1255,9 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
if (object == kInternalRootObject) {
ASSERT(root_entry_index_ == -1);
root_entry_index_ = entries_.length();
- HeapEntry* entry = GetNextEntryToInit();
- entry->Init(this, children_count, retainers_count);
- return entry;
+ ASSERT(retainers_count == 0);
+ return AddEntry(
+ HeapEntry::kInternal, "", 0, 0, children_count, retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
@@ -1262,7 +1265,7 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
String::cast(shared->name()) : shared->inferred_name();
return AddEntry(object,
HeapEntry::kClosure,
- collection_->GetName(name),
+ collection_->GetFunctionName(name),
children_count,
retainers_count);
} else if (object->IsJSObject()) {
@@ -1290,7 +1293,7 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
String::cast(shared->name()) : shared->inferred_name();
return AddEntry(object,
HeapEntry::kCode,
- collection_->GetName(name),
+ collection_->GetFunctionName(name),
children_count,
retainers_count);
} else if (object->IsScript()) {
@@ -1345,14 +1348,23 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
const char* name,
int children_count,
int retainers_count) {
+ return AddEntry(type,
+ name,
+ collection_->GetObjectId(object->address()),
+ GetObjectSize(object),
+ children_count,
+ retainers_count);
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
+ const char* name,
+ uint64_t id,
+ int size,
+ int children_count,
+ int retainers_count) {
HeapEntry* entry = GetNextEntryToInit();
- entry->Init(this,
- type,
- name,
- collection_->GetObjectId(object->address()),
- GetObjectSize(object),
- children_count,
- retainers_count);
+ entry->Init(this, type, name, id, size, children_count, retainers_count);
return entry;
}
@@ -1365,6 +1377,8 @@ HeapEntry* HeapSnapshot::GetNextEntryToInit() {
} else {
entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
}
+ ASSERT(reinterpret_cast<char*>(entries_.last()) <
+ (raw_entries_ + raw_entries_size_));
return entries_.last();
}
@@ -1534,10 +1548,11 @@ HeapSnapshotsCollection::~HeapSnapshotsCollection() {
}
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
+ const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
- HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
+ HeapSnapshot* snapshot = new HeapSnapshot(this, type, name, uid);
snapshots_.Add(snapshot);
HashMap::Entry* entry =
snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
@@ -1564,6 +1579,9 @@ HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
}
+HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
+ reinterpret_cast<HeapEntry*>(1);
+
HeapEntriesMap::HeapEntriesMap()
: entries_(HeapObjectsMatch),
entries_count_(0),
@@ -1612,7 +1630,7 @@ void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
int* prev_children_count,
int* prev_retainers_count) {
- HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+ HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
ASSERT(from_cache_entry != NULL);
ASSERT(to_cache_entry != NULL);
@@ -1631,42 +1649,19 @@ void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
}
-template<class Visitor>
-void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- if (!IsAlias(p->value)) {
- EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
- entry_info->entry = visitor->GetEntry(
- reinterpret_cast<HeapObject*>(p->key),
- entry_info->children_count,
- entry_info->retainers_count);
- entry_info->children_count = 0;
- entry_info->retainers_count = 0;
- }
- }
-}
-
-
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
: snapshot_(snapshot),
collection_(snapshot->collection()),
filler_(NULL) {
}
-
-HeapEntry *const
-HeapSnapshotGenerator::SnapshotFillerInterface::kHeapEntryPlaceholder =
- reinterpret_cast<HeapEntry*>(1);
-
class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
public:
explicit SnapshotCounter(HeapEntriesMap* entries)
: entries_(entries) { }
HeapEntry* AddEntry(HeapObject* obj) {
- entries_->Pair(obj, kHeapEntryPlaceholder);
- return kHeapEntryPlaceholder;
+ entries_->Pair(obj, HeapEntriesMap::kHeapEntryPlaceholder);
+ return HeapEntriesMap::kHeapEntryPlaceholder;
}
void SetElementReference(HeapObject* parent_obj,
HeapEntry*,
@@ -2057,10 +2052,12 @@ void HeapSnapshotGenerator::SetRootReference(Object* child_obj) {
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
raw_additions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
- additions_root()->Init(snapshot2_, additions_count, 0);
+ additions_root()->Init(
+ snapshot2_, HeapEntry::kInternal, "", 0, 0, additions_count, 0);
raw_deletions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
- deletions_root()->Init(snapshot1_, deletions_count, 0);
+ deletions_root()->Init(
+ snapshot1_, HeapEntry::kInternal, "", 0, 0, deletions_count, 0);
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index bebf40a376..c6d6f4cbc6 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -67,6 +67,8 @@ class StringsStorage {
~StringsStorage();
const char* GetName(String* name);
+ inline const char* GetFunctionName(String* name);
+ inline const char* GetFunctionName(const char* name);
private:
INLINE(static bool StringsMatch(void* key1, void* key2)) {
@@ -297,10 +299,17 @@ class CpuProfilesCollection {
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
+ // Limits the number of profiles that can be simultaneously collected.
+ static const int kMaxSimultaneousProfiles = 100;
+
private:
- INLINE(const char* GetFunctionName(String* name));
- INLINE(const char* GetFunctionName(const char* name));
const char* GetName(int args_count);
+ const char* GetFunctionName(String* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
@@ -498,7 +507,6 @@ class HeapEntry BASE_EMBEDDED {
};
HeapEntry() { }
- void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
void Init(HeapSnapshot* snapshot,
Type type,
const char* name,
@@ -640,12 +648,19 @@ class HeapSnapshotsDiff;
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
+ enum Type {
+ kFull = v8::HeapSnapshot::kFull,
+ kAggregated = v8::HeapSnapshot::kAggregated
+ };
+
HeapSnapshot(HeapSnapshotsCollection* collection,
+ Type type,
const char* title,
unsigned uid);
~HeapSnapshot();
HeapSnapshotsCollection* collection() { return collection_; }
+ Type type() { return type_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
HeapEntry* root() { return entries_[root_entry_index_]; }
@@ -655,6 +670,12 @@ class HeapSnapshot {
HeapEntry* AddEntry(
HeapObject* object, int children_count, int retainers_count);
bool WillAddEntry(HeapObject* object);
+ HeapEntry* AddEntry(HeapEntry::Type type,
+ const char* name,
+ uint64_t id,
+ int size,
+ int children_count,
+ int retainers_count);
int AddCalculatedData();
HeapEntryCalculatedData& GetCalculatedData(int index) {
return calculated_data_[index];
@@ -681,6 +702,7 @@ class HeapSnapshot {
static int CalculateNetworkSize(JSObject* obj);
HeapSnapshotsCollection* collection_;
+ Type type_;
const char* title_;
unsigned uid_;
int root_entry_index_;
@@ -688,6 +710,9 @@ class HeapSnapshot {
List<HeapEntry*> entries_;
bool entries_sorted_;
List<HeapEntryCalculatedData> calculated_data_;
+#ifdef DEBUG
+ int raw_entries_size_;
+#endif
friend class HeapSnapshotTester;
@@ -792,12 +817,16 @@ class HeapSnapshotsCollection {
bool is_tracking_objects() { return is_tracking_objects_; }
- HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ HeapSnapshot* NewSnapshot(
+ HeapSnapshot::Type type, const char* name, unsigned uid);
void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
const char* GetName(String* name) { return names_.GetName(name); }
+ const char* GetFunctionName(String* name) {
+ return names_.GetFunctionName(name);
+ }
TokenEnumerator* token_enumerator() { return token_enumerator_; }
@@ -848,6 +877,8 @@ class HeapEntriesMap {
int total_children_count() { return total_children_count_; }
int total_retainers_count() { return total_retainers_count_; }
+ static HeapEntry *const kHeapEntryPlaceholder;
+
private:
struct EntryInfo {
explicit EntryInfo(HeapEntry* entry)
@@ -903,8 +934,6 @@ class HeapSnapshotGenerator {
HeapEntry* child_entry) = 0;
virtual void SetRootReference(Object* child_obj,
HeapEntry* child_entry) = 0;
-
- static HeapEntry *const kHeapEntryPlaceholder;
};
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 9f8e2c5c81..652b690d7b 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -28,6 +28,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_H_
+#include "ast.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index fa702b2a82..566a96c348 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -137,17 +137,6 @@ function RegExpCache() {
var regExpCache = new RegExpCache();
-function CloneRegExpResult(array) {
- if (array == null) return null;
- var length = array.length;
- var answer = %_RegExpConstructResult(length, array.index, array.input);
- for (var i = 0; i < length; i++) {
- answer[i] = array[i];
- }
- return answer;
-}
-
-
function BuildResultFromMatchInfo(lastMatchInfo, s) {
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
@@ -197,7 +186,7 @@ function RegExpExec(string) {
%_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string)) {
if (cache.answerSaved) {
- return CloneRegExpResult(cache.answer);
+ return %_RegExpCloneResult(cache.answer);
} else {
saveAnswer = true;
}
@@ -251,7 +240,7 @@ function RegExpExec(string) {
cache.regExp = this;
cache.subject = s;
cache.lastIndex = lastIndex;
- if (saveAnswer) cache.answer = CloneRegExpResult(result);
+ if (saveAnswer) cache.answer = %_RegExpCloneResult(result);
cache.answerSaved = saveAnswer;
cache.type = 'exec';
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 73301b9186..4ddf1bf6f1 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -28,21 +28,15 @@
#include "v8.h"
#include "ast.h"
-#include "func-name-inferrer.h"
#include "scopes.h"
#include "rewriter.h"
namespace v8 {
namespace internal {
-
class AstOptimizer: public AstVisitor {
public:
explicit AstOptimizer() : has_function_literal_(false) {}
- explicit AstOptimizer(Handle<String> enclosing_name)
- : has_function_literal_(false) {
- func_name_inferrer_.PushEnclosingName(enclosing_name);
- }
void Optimize(ZoneList<Statement*>* statements);
@@ -50,8 +44,6 @@ class AstOptimizer: public AstVisitor {
// Used for loop condition analysis. Cleared before visiting a loop
// condition, set when a function literal is visited.
bool has_function_literal_;
- // Helper object for function name inferring.
- FuncNameInferrer func_name_inferrer_;
// Helpers
void OptimizeArguments(ZoneList<Expression*>* arguments);
@@ -113,7 +105,7 @@ void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
- node->may_have_function_literal_ = has_function_literal_;
+ node->set_may_have_function_literal(has_function_literal_);
Visit(node->body());
}
@@ -126,7 +118,7 @@ void AstOptimizer::VisitForStatement(ForStatement* node) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
- node->may_have_function_literal_ = has_function_literal_;
+ node->set_may_have_function_literal(has_function_literal_);
}
Visit(node->body());
if (node->next() != NULL) {
@@ -211,11 +203,6 @@ void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
has_function_literal_ = true;
-
- if (node->name()->length() == 0) {
- // Anonymous function.
- func_name_inferrer_.AddFunction(node);
- }
}
@@ -247,11 +234,6 @@ void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
var->type()->SetAsLikelySmi();
}
- if (!var->is_this() &&
- !Heap::result_symbol()->Equals(*var->name())) {
- func_name_inferrer_.PushName(var->name());
- }
-
if (FLAG_safe_int32_compiler) {
if (var->IsStackAllocated() &&
!var->is_arguments() &&
@@ -268,11 +250,6 @@ void AstOptimizer::VisitLiteral(Literal* node) {
if (literal->IsSmi()) {
node->type()->SetAsLikelySmi();
node->set_side_effect_free(true);
- } else if (literal->IsString()) {
- Handle<String> lit_str(Handle<String>::cast(literal));
- if (!Heap::prototype_symbol()->Equals(*lit_str)) {
- func_name_inferrer_.PushName(lit_str);
- }
} else if (literal->IsHeapNumber()) {
if (node->to_int32()) {
// Any HeapNumber has an int32 value if it is the input to a bit op.
@@ -299,8 +276,6 @@ void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
- scoped_fni.Enter();
Visit(node->properties()->at(i)->key());
Visit(node->properties()->at(i)->value());
}
@@ -314,17 +289,11 @@ void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
void AstOptimizer::VisitAssignment(Assignment* node) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
switch (node->op()) {
case Token::INIT_VAR:
case Token::INIT_CONST:
case Token::ASSIGN:
// No type can be infered from the general assignment.
-
- // Don't infer if it is "a = function(){...}();"-like expression.
- if (node->value()->AsCall() == NULL) {
- scoped_fni.Enter();
- }
break;
case Token::ASSIGN_BIT_OR:
case Token::ASSIGN_BIT_XOR:
@@ -430,12 +399,6 @@ void AstOptimizer::VisitCallNew(CallNew* node) {
void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
- if (Factory::InitializeVarGlobal_symbol()->Equals(*node->name()) &&
- node->arguments()->length() >= 2 &&
- node->arguments()->at(1)->AsFunctionLiteral() != NULL) {
- scoped_fni.Enter();
- }
OptimizeArguments(node->arguments());
}
@@ -472,6 +435,11 @@ void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
}
+void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void AstOptimizer::VisitCountOperation(CountOperation* node) {
// Count operations assume that they work on Smis.
node->expression()->set_no_negative_zero(node->is_prefix() ?
@@ -704,6 +672,11 @@ void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
}
+void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
+ Visit(node->expression());
+}
+
+
void AstOptimizer::VisitThisFunction(ThisFunction* node) {
USE(node);
}
@@ -978,6 +951,11 @@ void Processor::VisitUnaryOperation(UnaryOperation* node) {
}
+void Processor::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void Processor::VisitCountOperation(CountOperation* node) {
USE(node);
UNREACHABLE();
@@ -996,6 +974,12 @@ void Processor::VisitCompareOperation(CompareOperation* node) {
}
+void Processor::VisitCompareToNull(CompareToNull* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
void Processor::VisitThisFunction(ThisFunction* node) {
USE(node);
UNREACHABLE();
@@ -1025,7 +1009,7 @@ bool Rewriter::Optimize(FunctionLiteral* function) {
if (FLAG_optimize_ast && !body->is_empty()) {
HistogramTimerScope timer(&Counters::ast_optimization);
- AstOptimizer optimizer(function->name());
+ AstOptimizer optimizer;
optimizer.Optimize(body);
if (optimizer.HasStackOverflow()) {
return false;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index fc6ca762f1..43a6734235 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -98,7 +98,7 @@ namespace internal {
static StaticResource<StringInputBuffer> runtime_string_input_buffer;
-static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
+MUST_USE_RESULT static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
StackLimitCheck check;
if (check.HasOverflowed()) return Top::StackOverflow();
@@ -160,13 +160,22 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- result = DeepCopyBoilerplate(js_object);
- if (result->IsFailure()) return result;
- elements->set(i, result);
+ if (elements->map() == Heap::fixed_cow_array_map()) {
+ Counters::cow_arrays_created_runtime.Increment();
+#ifdef DEBUG
+ for (int i = 0; i < elements->length(); i++) {
+ ASSERT(!elements->get(i)->IsJSObject());
+ }
+#endif
+ } else {
+ for (int i = 0; i < elements->length(); i++) {
+ Object* value = elements->get(i);
+ if (value->IsJSObject()) {
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
+ if (result->IsFailure()) return result;
+ elements->set(i, result);
+ }
}
}
break;
@@ -343,18 +352,29 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
JSFunction::GlobalContextFromLiterals(*literals)->array_function());
Handle<Object> object = Factory::NewJSObject(constructor);
- Handle<Object> copied_elements = Factory::CopyFixedArray(elements);
+ const bool is_cow = (elements->map() == Heap::fixed_cow_array_map());
+ Handle<FixedArray> copied_elements =
+ is_cow ? elements : Factory::CopyFixedArray(elements);
Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
- for (int i = 0; i < content->length(); i++) {
- if (content->get(i)->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object literal.
- Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
- Handle<Object> result =
- CreateLiteralBoilerplate(literals, fa);
- if (result.is_null()) return result;
- content->set(i, *result);
+ if (is_cow) {
+#ifdef DEBUG
+ // Copy-on-write arrays must be shallow (and simple).
+ for (int i = 0; i < content->length(); i++) {
+ ASSERT(!content->get(i)->IsFixedArray());
+ }
+#endif
+ } else {
+ for (int i = 0; i < content->length(); i++) {
+ if (content->get(i)->IsFixedArray()) {
+ // The value contains the constant_properties of a
+ // simple object literal.
+ Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
+ Handle<Object> result =
+ CreateLiteralBoilerplate(literals, fa);
+ if (result.is_null()) return result;
+ content->set(i, *result);
+ }
}
}
@@ -483,6 +503,10 @@ static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
+ if (JSObject::cast(*boilerplate)->elements()->map() ==
+ Heap::fixed_cow_array_map()) {
+ Counters::cow_arrays_created_runtime.Increment();
+ }
return Heap::CopyJSObject(JSObject::cast(*boilerplate));
}
@@ -956,7 +980,9 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
context->set(index, *initial_value);
}
} else {
- Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
+ // The holder is an arguments object.
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ SetElement(arguments, index, initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@@ -1214,7 +1240,8 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
} else {
// The holder is an arguments object.
ASSERT((attributes & READ_ONLY) == 0);
- Handle<JSObject>::cast(holder)->SetElement(index, *value);
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ SetElement(arguments, index, value);
}
return *value;
}
@@ -1340,6 +1367,63 @@ static Object* Runtime_RegExpConstructResult(Arguments args) {
}
+static Object* Runtime_RegExpCloneResult(Arguments args) {
+ ASSERT(args.length() == 1);
+ Map* regexp_result_map;
+ {
+ AssertNoAllocation no_gc;
+ HandleScope handles;
+ regexp_result_map = Top::global_context()->regexp_result_map();
+ }
+ if (!args[0]->IsJSArray()) return args[0];
+
+ JSArray* result = JSArray::cast(args[0]);
+ // Arguments to RegExpCloneResult should always be fresh RegExp exec call
+ // results (either a fresh JSRegExpResult or null).
+ // If the argument is not a JSRegExpResult, or isn't unmodified, just return
+ // the argument uncloned.
+ if (result->map() != regexp_result_map) return result;
+
+ // Having the original JSRegExpResult map guarantees that we have
+ // fast elements and no properties except the two in-object properties.
+ ASSERT(result->HasFastElements());
+ ASSERT(result->properties() == Heap::empty_fixed_array());
+ ASSERT_EQ(2, regexp_result_map->inobject_properties());
+
+ Object* new_array_alloc = Heap::AllocateRaw(JSRegExpResult::kSize,
+ NEW_SPACE,
+ OLD_POINTER_SPACE);
+ if (new_array_alloc->IsFailure()) return new_array_alloc;
+
+ // Set HeapObject map to JSRegExpResult map.
+ reinterpret_cast<HeapObject*>(new_array_alloc)->set_map(regexp_result_map);
+
+ JSArray* new_array = JSArray::cast(new_array_alloc);
+
+ // Copy JSObject properties.
+ new_array->set_properties(result->properties()); // Empty FixedArray.
+
+ // Copy JSObject elements as copy-on-write.
+ FixedArray* elements = FixedArray::cast(result->elements());
+ if (elements != Heap::empty_fixed_array()) {
+ elements->set_map(Heap::fixed_cow_array_map());
+ }
+ new_array->set_elements(elements);
+
+ // Copy JSArray length.
+ new_array->set_length(result->length());
+
+ // Copy JSRegExpResult in-object property fields input and index.
+ new_array->FastPropertyAtPut(JSRegExpResult::kIndexIndex,
+ result->FastPropertyAt(
+ JSRegExpResult::kIndexIndex));
+ new_array->FastPropertyAtPut(JSRegExpResult::kInputIndex,
+ result->FastPropertyAt(
+ JSRegExpResult::kInputIndex));
+ return new_array;
+}
+
+
static Object* Runtime_RegExpInitializeObject(Arguments args) {
AssertNoAllocation no_alloc;
ASSERT(args.length() == 5);
@@ -1620,7 +1704,6 @@ static Object* Runtime_SetCode(Arguments args) {
RUNTIME_ASSERT(code->IsJSFunction());
Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
Handle<SharedFunctionInfo> shared(fun->shared());
- SetExpectedNofProperties(target, shared->expected_nof_properties());
if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
@@ -1665,6 +1748,17 @@ static Object* Runtime_SetCode(Arguments args) {
}
+static Object* Runtime_SetExpectedNumberOfProperties(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_SMI_CHECKED(num, args[1]);
+ RUNTIME_ASSERT(num >= 0);
+ SetExpectedNofProperties(function, num);
+ return Heap::undefined_value();
+}
+
+
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (char_code->ToArrayIndex(&code)) {
@@ -2724,40 +2818,6 @@ static int BoyerMooreIndexOf(Vector<const schar> subject,
}
-template <typename schar>
-static inline int SingleCharIndexOf(Vector<const schar> string,
- schar pattern_char,
- int start_index) {
- if (sizeof(schar) == 1) {
- const schar* pos = reinterpret_cast<const schar*>(
- memchr(string.start() + start_index,
- pattern_char,
- string.length() - start_index));
- if (pos == NULL) return -1;
- return static_cast<int>(pos - string.start());
- }
- for (int i = start_index, n = string.length(); i < n; i++) {
- if (pattern_char == string[i]) {
- return i;
- }
- }
- return -1;
-}
-
-
-template <typename schar>
-static int SingleCharLastIndexOf(Vector<const schar> string,
- schar pattern_char,
- int start_index) {
- for (int i = start_index; i >= 0; i--) {
- if (pattern_char == string[i]) {
- return i;
- }
- }
- return -1;
-}
-
-
// Trivial string search for shorter strings.
// On return, if "complete" is set to true, the return value is the
// final result of searching for the patter in the subject.
@@ -2769,6 +2829,7 @@ static int SimpleIndexOf(Vector<const schar> subject,
Vector<const pchar> pattern,
int idx,
bool* complete) {
+ ASSERT(pattern.length() > 1);
// Badness is a count of how much work we have done. When we have
// done enough work we decide it's probably worth switching to a better
// algorithm.
@@ -2831,12 +2892,12 @@ static int SimpleIndexOf(Vector<const schar> subject,
if (subject[i] != pattern_first_char) continue;
}
int j = 1;
- do {
+ while (j < pattern.length()) {
if (pattern[j] != subject[i+j]) {
break;
}
j++;
- } while (j < pattern.length());
+ }
if (j == pattern.length()) {
return i;
}
@@ -2852,7 +2913,6 @@ enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
template <typename pchar>
static inline StringSearchStrategy InitializeStringSearch(
Vector<const pchar> pat, bool ascii_subject) {
- ASSERT(pat.length() > 1);
// We have an ASCII haystack and a non-ASCII needle. Check if there
// really is a non-ASCII character in the needle and bail out if there
// is.
@@ -2938,54 +2998,15 @@ int Runtime::StringMatch(Handle<String> sub,
int subject_length = sub->length();
if (start_index + pattern_length > subject_length) return -1;
- if (!sub->IsFlat()) {
- FlattenString(sub);
- }
-
- // Searching for one specific character is common. For one
- // character patterns linear search is necessary, so any smart
- // algorithm is unnecessary overhead.
- if (pattern_length == 1) {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- String* seq_sub = *sub;
- if (seq_sub->IsConsString()) {
- seq_sub = ConsString::cast(seq_sub)->first();
- }
- if (seq_sub->IsAsciiRepresentation()) {
- uc16 pchar = pat->Get(0);
- if (pchar > String::kMaxAsciiCharCode) {
- return -1;
- }
- Vector<const char> ascii_vector =
- seq_sub->ToAsciiVector().SubVector(start_index, subject_length);
- const void* pos = memchr(ascii_vector.start(),
- static_cast<const char>(pchar),
- static_cast<size_t>(ascii_vector.length()));
- if (pos == NULL) {
- return -1;
- }
- return static_cast<int>(reinterpret_cast<const char*>(pos)
- - ascii_vector.start() + start_index);
- }
- return SingleCharIndexOf(seq_sub->ToUC16Vector(),
- pat->Get(0),
- start_index);
- }
-
- if (!pat->IsFlat()) {
- FlattenString(pat);
- }
+ if (!sub->IsFlat()) FlattenString(sub);
+ if (!pat->IsFlat()) FlattenString(pat);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
String* seq_sub = *sub;
- if (seq_sub->IsConsString()) {
- seq_sub = ConsString::cast(seq_sub)->first();
- }
+ if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
String* seq_pat = *pat;
- if (seq_pat->IsConsString()) {
- seq_pat = ConsString::cast(seq_pat)->first();
- }
+ if (seq_pat->IsConsString()) seq_pat = ConsString::cast(seq_pat)->first();
// dispatch on type of strings
if (seq_pat->IsAsciiRepresentation()) {
@@ -3075,30 +3096,8 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
return Smi::FromInt(start_index);
}
- if (!sub->IsFlat()) {
- FlattenString(sub);
- }
-
- if (pat_length == 1) {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- if (sub->IsAsciiRepresentation()) {
- uc16 pchar = pat->Get(0);
- if (pchar > String::kMaxAsciiCharCode) {
- return Smi::FromInt(-1);
- }
- return Smi::FromInt(SingleCharLastIndexOf(sub->ToAsciiVector(),
- static_cast<char>(pat->Get(0)),
- start_index));
- } else {
- return Smi::FromInt(SingleCharLastIndexOf(sub->ToUC16Vector(),
- pat->Get(0),
- start_index));
- }
- }
-
- if (!pat->IsFlat()) {
- FlattenString(pat);
- }
+ if (!sub->IsFlat()) FlattenString(sub);
+ if (!pat->IsFlat()) FlattenString(pat);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
@@ -3276,88 +3275,6 @@ static void SetLastMatchInfoNoCaptures(Handle<String> subject,
}
-template <typename schar>
-static bool SearchCharMultiple(Vector<schar> subject,
- String* pattern,
- schar pattern_char,
- FixedArrayBuilder* builder,
- int* match_pos) {
- // Position of last match.
- int pos = *match_pos;
- int subject_length = subject.length();
- while (pos < subject_length) {
- int match_end = pos + 1;
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- int new_pos = SingleCharIndexOf(subject, pattern_char, match_end);
- if (new_pos >= 0) {
- // Match has been found.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder, match_end, new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
- }
- if (pos + 1 < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder, pos + 1, subject_length);
- }
- *match_pos = pos;
- return true;
-}
-
-
-static bool SearchCharMultiple(Handle<String> subject,
- Handle<String> pattern,
- Handle<JSArray> last_match_info,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- ASSERT_EQ(1, pattern->length());
- uc16 pattern_char = pattern->Get(0);
- // Treating position before first as initial "previous match position".
- int match_pos = -1;
-
- for (;;) { // Break when search complete.
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- AssertNoAllocation no_gc;
- if (subject->IsAsciiRepresentation()) {
- if (pattern_char > String::kMaxAsciiCharCode) {
- break;
- }
- Vector<const char> subject_vector = subject->ToAsciiVector();
- char pattern_ascii_char = static_cast<char>(pattern_char);
- bool complete = SearchCharMultiple<const char>(subject_vector,
- *pattern,
- pattern_ascii_char,
- builder,
- &match_pos);
- if (complete) break;
- } else {
- Vector<const uc16> subject_vector = subject->ToUC16Vector();
- bool complete = SearchCharMultiple<const uc16>(subject_vector,
- *pattern,
- pattern_char,
- builder,
- &match_pos);
- if (complete) break;
- }
- }
-
- if (match_pos >= 0) {
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- match_pos,
- match_pos + 1);
- return true;
- }
- return false; // No matches at all.
-}
-
-
template <typename schar, typename pchar>
static bool SearchStringMultiple(Vector<schar> subject,
String* pattern,
@@ -3435,7 +3352,6 @@ static bool SearchStringMultiple(Handle<String> subject,
FixedArrayBuilder* builder) {
ASSERT(subject->IsFlat());
ASSERT(pattern->IsFlat());
- ASSERT(pattern->length() > 1);
// Treating as if a previous match was before first character.
int match_pos = -pattern->length();
@@ -3500,7 +3416,7 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
OffsetsVector registers(required_registers);
- Vector<int> register_vector(registers.vector(), registers.length());
+ Vector<int32_t> register_vector(registers.vector(), registers.length());
int subject_length = subject->length();
for (;;) { // Break on failure, return on exception.
@@ -3562,7 +3478,7 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
OffsetsVector registers(required_registers);
- Vector<int> register_vector(registers.vector(), registers.length());
+ Vector<int32_t> register_vector(registers.vector(), registers.length());
RegExpImpl::IrregexpResult result =
RegExpImpl::IrregexpExecOnce(regexp,
@@ -3622,7 +3538,7 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
}
// Swap register vectors, so the last successful match is in
// prev_register_vector.
- Vector<int> tmp = prev_register_vector;
+ Vector<int32_t> tmp = prev_register_vector;
prev_register_vector = register_vector;
register_vector = tmp;
@@ -3693,14 +3609,6 @@ static Object* Runtime_RegExpExecMultiple(Arguments args) {
if (regexp->TypeTag() == JSRegExp::ATOM) {
Handle<String> pattern(
String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
- int pattern_length = pattern->length();
- if (pattern_length == 1) {
- if (SearchCharMultiple(subject, pattern, last_match_info, &builder)) {
- return *builder.ToJSArray(result_array);
- }
- return Heap::null_value();
- }
-
if (!pattern->IsFlat()) FlattenString(pattern);
if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
return *builder.ToJSArray(result_array);
@@ -4014,7 +3922,8 @@ static Object* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
if (result.IsProperty() &&
(result.type() == FIELD || result.type() == NORMAL
|| result.type() == CONSTANT_FUNCTION)) {
- obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ Object* ok = obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ if (ok->IsFailure()) return ok;
}
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
}
@@ -4763,6 +4672,18 @@ static Object* Runtime_StringToNumber(Arguments args) {
if (minus) {
if (d == 0) return Heap::minus_zero_value();
d = -d;
+ } else if (!subject->HasHashCode() &&
+ len <= String::kMaxArrayIndexSize &&
+ (len == 1 || data[0] != '0')) {
+ // String hash is not calculated yet but all the data are present.
+ // Update the hash field to speed up sequential convertions.
+ uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
+#ifdef DEBUG
+ subject->Hash(); // Force hash calculation.
+ ASSERT_EQ(static_cast<int>(subject->hash_field()),
+ static_cast<int>(hash));
+#endif
+ subject->set_hash_field(hash);
}
return Smi::FromInt(d);
}
@@ -5288,23 +5209,6 @@ void FindStringIndices(Vector<const schar> subject,
}
}
-template <typename schar>
-inline void FindCharIndices(Vector<const schar> subject,
- const schar pattern_char,
- ZoneList<int>* indices,
- unsigned int limit) {
- // Collect indices of pattern_char in subject, and the end-of-string index.
- // Stop after finding at most limit values.
- int index = 0;
- while (limit > 0) {
- index = SingleCharIndexOf(subject, pattern_char, index);
- if (index < 0) return;
- indices->Add(index);
- index++;
- limit--;
- }
-}
-
static Object* Runtime_StringSplit(Arguments args) {
ASSERT(args.length() == 3);
@@ -5330,22 +5234,10 @@ static Object* Runtime_StringSplit(Arguments args) {
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
ZoneList<int> indices(initial_capacity);
- if (pattern_length == 1) {
- // Special case, go directly to fast single-character split.
- AssertNoAllocation nogc;
- uc16 pattern_char = pattern->Get(0);
- if (subject->IsTwoByteRepresentation()) {
- FindCharIndices(subject->ToUC16Vector(), pattern_char,
- &indices,
- limit);
- } else if (pattern_char <= String::kMaxAsciiCharCode) {
- FindCharIndices(subject->ToAsciiVector(),
- static_cast<char>(pattern_char),
- &indices,
- limit);
- }
- } else {
- if (!pattern->IsFlat()) FlattenString(pattern);
+ if (!pattern->IsFlat()) FlattenString(pattern);
+
+ // No allocation block.
+ {
AssertNoAllocation nogc;
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
@@ -5375,11 +5267,12 @@ static Object* Runtime_StringSplit(Arguments args) {
}
}
}
+
if (static_cast<uint32_t>(indices.length()) < limit) {
indices.Add(subject_length);
}
- // The list indices now contains the end of each part to create.
+ // The list indices now contains the end of each part to create.
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
@@ -5484,6 +5377,14 @@ static Object* Runtime_StringToArray(Arguments args) {
}
+static Object* Runtime_NewStringWrapper(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(String, value, args[0]);
+ return value->ToObject();
+}
+
+
bool Runtime::IsUpperCaseChar(uint16_t ch) {
unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
int char_length = to_upper_mapping.get(ch, 0, chars);
@@ -6699,9 +6600,13 @@ static Object* Runtime_DateYMDFromTime(Arguments args) {
int year, month, day;
DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
- res_array->SetElement(0, Smi::FromInt(year));
- res_array->SetElement(1, Smi::FromInt(month));
- res_array->SetElement(2, Smi::FromInt(day));
+ RUNTIME_ASSERT(res_array->elements()->map() == Heap::fixed_array_map());
+ FixedArray* elms = FixedArray::cast(res_array->elements());
+ RUNTIME_ASSERT(elms->length() == 3);
+
+ elms->set(0, Smi::FromInt(year));
+ elms->set(1, Smi::FromInt(month));
+ elms->set(2, Smi::FromInt(day));
return Heap::undefined_value();
}
@@ -8057,7 +7962,8 @@ static Object* Runtime_MoveArrayContents(Arguments args) {
CONVERT_CHECKED(JSArray, to, args[1]);
HeapObject* new_elements = from->elements();
Object* new_map;
- if (new_elements->map() == Heap::fixed_array_map()) {
+ if (new_elements->map() == Heap::fixed_array_map() ||
+ new_elements->map() == Heap::fixed_cow_array_map()) {
new_map = to->map()->GetFastElementsMap();
} else {
new_map = to->map()->GetSlowElementsMap();
@@ -10602,9 +10508,10 @@ Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
}
-Runtime::Function* Runtime::FunctionForName(const char* name) {
+Runtime::Function* Runtime::FunctionForName(Vector<const char> name) {
for (Function* f = Runtime_functions; f->name != NULL; f++) {
- if (strcmp(f->name, name) == 0) {
+ if (strncmp(f->name, name.start(), name.length()) == 0
+ && f->name[name.length()] == 0) {
return f;
}
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 26a2b9df24..312907adc5 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -162,6 +162,7 @@ namespace internal {
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeObject, 5, 1) \
F(RegExpConstructResult, 3, 1) \
+ F(RegExpCloneResult, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -173,6 +174,7 @@ namespace internal {
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
F(StringToArray, 1, 1) \
+ F(NewStringWrapper, 1, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -200,6 +202,7 @@ namespace internal {
\
F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \
+ F(SetExpectedNumberOfProperties, 2, 1) \
\
F(CreateApiFunction, 1, 1) \
F(IsTemplate, 1, 1) \
@@ -418,7 +421,7 @@ class Runtime : public AllStatic {
static Function* FunctionForId(FunctionId fid);
// Get the runtime function with the given name.
- static Function* FunctionForName(const char* name);
+ static Function* FunctionForName(Vector<const char> name);
static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 42968104b6..f2c8d6b846 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -502,7 +502,10 @@ function ToBoolean(x) {
// ECMA-262, section 9.3, page 31.
function ToNumber(x) {
if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) return %StringToNumber(x);
+ if (IS_STRING(x)) {
+ return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
+ : %StringToNumber(x);
+ }
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return $NaN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index ca0e2d86e1..15b1d44203 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -50,35 +50,22 @@ StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
// ----------------------------------------------------------------------------
// UTF8Buffer
-UTF8Buffer::UTF8Buffer() : data_(NULL), limit_(NULL) { }
+UTF8Buffer::UTF8Buffer() : buffer_(kInitialCapacity) { }
-UTF8Buffer::~UTF8Buffer() {
- if (data_ != NULL) DeleteArray(data_);
-}
+UTF8Buffer::~UTF8Buffer() {}
void UTF8Buffer::AddCharSlow(uc32 c) {
- static const int kCapacityGrowthLimit = 1 * MB;
- if (cursor_ > limit_) {
- int old_capacity = Capacity();
- int old_position = pos();
- int new_capacity =
- Min(old_capacity * 3, old_capacity + kCapacityGrowthLimit);
- char* new_data = NewArray<char>(new_capacity);
- memcpy(new_data, data_, old_position);
- DeleteArray(data_);
- data_ = new_data;
- cursor_ = new_data + old_position;
- limit_ = ComputeLimit(new_data, new_capacity);
- ASSERT(Capacity() == new_capacity && pos() == old_position);
- }
- if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
- *cursor_++ = c; // Common case: 7-bit ASCII.
- } else {
- cursor_ += unibrow::Utf8::Encode(cursor_, c);
- }
- ASSERT(pos() <= Capacity());
+ ASSERT(static_cast<unsigned>(c) > unibrow::Utf8::kMaxOneByteChar);
+ int length = unibrow::Utf8::Length(c);
+ Vector<char> block = buffer_.AddBlock(length, '\0');
+#ifdef DEBUG
+ int written_length = unibrow::Utf8::Encode(block.start(), c);
+ CHECK_EQ(length, written_length);
+#else
+ unibrow::Utf8::Encode(block.start(), c);
+#endif
}
@@ -332,6 +319,26 @@ void KeywordMatcher::Step(uc32 input) {
}
+
+// ----------------------------------------------------------------------------
+// Scanner::LiteralScope
+
+Scanner::LiteralScope::LiteralScope(Scanner* self)
+ : scanner_(self), complete_(false) {
+ self->StartLiteral();
+}
+
+
+Scanner::LiteralScope::~LiteralScope() {
+ if (!complete_) scanner_->DropLiteral();
+}
+
+
+void Scanner::LiteralScope::Complete() {
+ scanner_->TerminateLiteral();
+ complete_ = true;
+}
+
// ----------------------------------------------------------------------------
// Scanner
@@ -399,8 +406,10 @@ void Scanner::Init(Handle<String> source,
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
- // Initializer current_ to not refer to a literal buffer.
- current_.literal_buffer = NULL;
+ // Initialize current_ to not refer to a literal.
+ current_.literal_chars = Vector<const char>();
+ // Reset literal buffer.
+ literal_buffer_.Reset();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@@ -421,6 +430,7 @@ Token::Value Scanner::Next() {
stack_overflow_ = true;
next_.token = Token::ILLEGAL;
} else {
+ has_line_terminator_before_next_ = false;
Scan();
}
return current_.token;
@@ -428,24 +438,22 @@ Token::Value Scanner::Next() {
void Scanner::StartLiteral() {
- // Use the first buffer unless it's currently in use by the current_ token.
- // In most cases we won't have two literals/identifiers in a row, so
- // the second buffer won't be used very often and is unlikely to grow much.
- UTF8Buffer* free_buffer =
- (current_.literal_buffer != &literal_buffer_1_) ? &literal_buffer_1_
- : &literal_buffer_2_;
- next_.literal_buffer = free_buffer;
- free_buffer->Reset();
+ literal_buffer_.StartLiteral();
}
void Scanner::AddChar(uc32 c) {
- next_.literal_buffer->AddChar(c);
+ literal_buffer_.AddChar(c);
}
void Scanner::TerminateLiteral() {
- AddChar(0);
+ next_.literal_chars = literal_buffer_.EndLiteral();
+}
+
+
+void Scanner::DropLiteral() {
+ literal_buffer_.DropLiteral();
}
@@ -575,7 +583,7 @@ Token::Value Scanner::ScanHtmlComment() {
void Scanner::ScanJson() {
- next_.literal_buffer = NULL;
+ next_.literal_chars = Vector<const char>();
Token::Value token;
has_line_terminator_before_next_ = false;
do {
@@ -657,7 +665,7 @@ void Scanner::ScanJson() {
Token::Value Scanner::ScanJsonString() {
ASSERT_EQ('"', c0_);
Advance();
- StartLiteral();
+ LiteralScope literal(this);
while (c0_ != '"' && c0_ > 0) {
// Check for control character (0x00-0x1f) or unterminated string (<0).
if (c0_ < 0x20) return Token::ILLEGAL;
@@ -691,7 +699,9 @@ Token::Value Scanner::ScanJsonString() {
for (int i = 0; i < 4; i++) {
Advance();
int digit = HexValue(c0_);
- if (digit < 0) return Token::ILLEGAL;
+ if (digit < 0) {
+ return Token::ILLEGAL;
+ }
value = value * 16 + digit;
}
AddChar(value);
@@ -706,14 +716,14 @@ Token::Value Scanner::ScanJsonString() {
if (c0_ != '"') {
return Token::ILLEGAL;
}
- TerminateLiteral();
+ literal.Complete();
Advance();
return Token::STRING;
}
Token::Value Scanner::ScanJsonNumber() {
- StartLiteral();
+ LiteralScope literal(this);
if (c0_ == '-') AddCharAdvance();
if (c0_ == '0') {
AddCharAdvance();
@@ -733,7 +743,7 @@ Token::Value Scanner::ScanJsonNumber() {
AddCharAdvance();
} while (c0_ >= '0' && c0_ <= '9');
}
- if ((c0_ | 0x20) == 'e') {
+ if (AsciiAlphaToLower(c0_) == 'e') {
AddCharAdvance();
if (c0_ == '-' || c0_ == '+') AddCharAdvance();
if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
@@ -741,29 +751,28 @@ Token::Value Scanner::ScanJsonNumber() {
AddCharAdvance();
} while (c0_ >= '0' && c0_ <= '9');
}
- TerminateLiteral();
+ literal.Complete();
return Token::NUMBER;
}
Token::Value Scanner::ScanJsonIdentifier(const char* text,
Token::Value token) {
- StartLiteral();
+ LiteralScope literal(this);
while (*text != '\0') {
if (c0_ != *text) return Token::ILLEGAL;
Advance();
text++;
}
if (kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
- TerminateLiteral();
+ literal.Complete();
return token;
}
void Scanner::ScanJavaScript() {
- next_.literal_buffer = NULL;
+ next_.literal_chars = Vector<const char>();
Token::Value token;
- has_line_terminator_before_next_ = false;
do {
// Remember the position of the next token
next_.location.beg_pos = source_pos();
@@ -1004,6 +1013,10 @@ void Scanner::ScanJavaScript() {
void Scanner::SeekForward(int pos) {
source_->SeekForward(pos - 1);
Advance();
+ // This function is only called to seek to the location
+ // of the end of a function (at the "}" token). It doesn't matter
+ // whether there was a line terminator in the part we skip.
+ has_line_terminator_before_next_ = false;
Scan();
}
@@ -1098,7 +1111,7 @@ Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
- StartLiteral();
+ LiteralScope literal(this);
while (c0_ != quote && c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
uc32 c = c0_;
Advance();
@@ -1109,10 +1122,8 @@ Token::Value Scanner::ScanString() {
AddChar(c);
}
}
- if (c0_ != quote) {
- return Token::ILLEGAL;
- }
- TerminateLiteral();
+ if (c0_ != quote) return Token::ILLEGAL;
+ literal.Complete();
Advance(); // consume quote
return Token::STRING;
@@ -1148,7 +1159,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
- StartLiteral();
+ LiteralScope literal(this);
if (seen_period) {
// we have already seen a decimal point of the float
AddChar('.');
@@ -1164,12 +1175,13 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// hex number
kind = HEX;
AddCharAdvance();
- if (!IsHexDigit(c0_))
+ if (!IsHexDigit(c0_)) {
// we must have at least one hex digit after 'x'/'X'
return Token::ILLEGAL;
- while (IsHexDigit(c0_))
+ }
+ while (IsHexDigit(c0_)) {
AddCharAdvance();
-
+ }
} else if ('0' <= c0_ && c0_ <= '7') {
// (possible) octal number
kind = OCTAL;
@@ -1202,12 +1214,12 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
AddCharAdvance();
if (c0_ == '+' || c0_ == '-')
AddCharAdvance();
- if (!IsDecimalDigit(c0_))
+ if (!IsDecimalDigit(c0_)) {
// we must have at least one decimal digit after 'e'/'E'
return Token::ILLEGAL;
+ }
ScanDecimalDigits();
}
- TerminateLiteral();
// The source character immediately following a numeric literal must
// not be an identifier start or a decimal digit; see ECMA-262
@@ -1216,6 +1228,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (IsDecimalDigit(c0_) || kIsIdentifierStart.get(c0_))
return Token::ILLEGAL;
+ literal.Complete();
+
return Token::NUMBER;
}
@@ -1235,7 +1249,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
Token::Value Scanner::ScanIdentifier() {
ASSERT(kIsIdentifierStart.get(c0_));
- StartLiteral();
+ LiteralScope literal(this);
KeywordMatcher keyword_match;
// Scan identifier start character.
@@ -1265,7 +1279,7 @@ Token::Value Scanner::ScanIdentifier() {
Advance();
}
}
- TerminateLiteral();
+ literal.Complete();
return keyword_match.token();
}
@@ -1295,36 +1309,32 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
// Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
// the scanner should pass uninterpreted bodies to the RegExp
// constructor.
- StartLiteral();
+ LiteralScope literal(this);
if (seen_equal)
AddChar('=');
while (c0_ != '/' || in_character_class) {
- if (kIsLineTerminator.get(c0_) || c0_ < 0)
- return false;
+ if (kIsLineTerminator.get(c0_) || c0_ < 0) return false;
if (c0_ == '\\') { // escaped character
AddCharAdvance();
- if (kIsLineTerminator.get(c0_) || c0_ < 0)
- return false;
+ if (kIsLineTerminator.get(c0_) || c0_ < 0) return false;
AddCharAdvance();
} else { // unescaped character
- if (c0_ == '[')
- in_character_class = true;
- if (c0_ == ']')
- in_character_class = false;
+ if (c0_ == '[') in_character_class = true;
+ if (c0_ == ']') in_character_class = false;
AddCharAdvance();
}
}
Advance(); // consume '/'
- TerminateLiteral();
+ literal.Complete();
return true;
}
bool Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
- StartLiteral();
+ LiteralScope literal(this);
while (kIsIdentifierPart.get(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
@@ -1337,7 +1347,7 @@ bool Scanner::ScanRegExpFlags() {
}
AddCharAdvance();
}
- TerminateLiteral();
+ literal.Complete();
next_.location.end_pos = source_pos() - 1;
return true;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 2dce5a18e0..8d6184697f 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -40,46 +40,45 @@ class UTF8Buffer {
UTF8Buffer();
~UTF8Buffer();
- void AddChar(uc32 c) {
- ASSERT_NOT_NULL(data_);
- if (cursor_ <= limit_ &&
- static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
- *cursor_++ = static_cast<char>(c);
+ inline void AddChar(uc32 c) {
+ if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+ buffer_.Add(static_cast<char>(c));
} else {
AddCharSlow(c);
}
}
- void Reset() {
- if (data_ == NULL) {
- data_ = NewArray<char>(kInitialCapacity);
- limit_ = ComputeLimit(data_, kInitialCapacity);
- }
- cursor_ = data_;
+ void StartLiteral() {
+ buffer_.StartSequence();
}
- int pos() const {
- ASSERT_NOT_NULL(data_);
- return static_cast<int>(cursor_ - data_);
+ Vector<const char> EndLiteral() {
+ buffer_.Add(kEndMarker);
+ Vector<char> sequence = buffer_.EndSequence();
+ return Vector<const char>(sequence.start(), sequence.length());
}
- char* data() const { return data_; }
-
- private:
- static const int kInitialCapacity = 256;
- char* data_;
- char* cursor_;
- char* limit_;
-
- int Capacity() const {
- ASSERT_NOT_NULL(data_);
- return static_cast<int>(limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+ void DropLiteral() {
+ buffer_.DropSequence();
}
- static char* ComputeLimit(char* data, int capacity) {
- return (data + capacity) - unibrow::Utf8::kMaxEncodedSize;
+ void Reset() {
+ buffer_.Reset();
}
+ // The end marker added after a parsed literal.
+ // Using zero allows the usage of strlen and similar functions on
+ // identifiers and numbers (but not strings, since they may contain zero
+ // bytes).
+ // TODO(lrn): Use '\xff' as end marker, since it cannot occur inside
+ // an utf-8 string. This requires changes in all places that uses
+ // str-functions on the literals, but allows a single pointer to represent
+ // the literal, even if it contains embedded zeros.
+ static const char kEndMarker = '\x00';
+ private:
+ static const int kInitialCapacity = 256;
+ SequenceCollector<char, 4> buffer_;
+
void AddCharSlow(uc32 c);
};
@@ -271,6 +270,17 @@ class Scanner {
public:
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+ class LiteralScope {
+ public:
+ explicit LiteralScope(Scanner* self);
+ ~LiteralScope();
+ void Complete();
+
+ private:
+ Scanner* scanner_;
+ bool complete_;
+ };
+
// Construction
explicit Scanner(ParserMode parse_mode);
@@ -314,27 +324,34 @@ class Scanner {
// These functions only give the correct result if the literal
// was scanned between calls to StartLiteral() and TerminateLiteral().
const char* literal_string() const {
- return current_.literal_buffer->data();
+ return current_.literal_chars.start();
}
+
int literal_length() const {
- // Excluding terminal '\0' added by TerminateLiteral().
- return current_.literal_buffer->pos() - 1;
+ // Excluding terminal '\x00' added by TerminateLiteral().
+ return current_.literal_chars.length() - 1;
+ }
+
+ Vector<const char> literal() const {
+ return Vector<const char>(literal_string(), literal_length());
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
const char* next_literal_string() const {
- return next_.literal_buffer->data();
+ return next_.literal_chars.start();
}
+
+
// Returns the length of the next token (that would be returned if
// Next() were called).
int next_literal_length() const {
- return next_.literal_buffer->pos() - 1;
+ // Excluding terminal '\x00' added by TerminateLiteral().
+ return next_.literal_chars.length() - 1;
}
Vector<const char> next_literal() const {
- return Vector<const char>(next_literal_string(),
- next_literal_length());
+ return Vector<const char>(next_literal_string(), next_literal_length());
}
// Scans the input as a regular expression pattern, previous
@@ -371,7 +388,7 @@ class Scanner {
struct TokenDesc {
Token::Value token;
Location location;
- UTF8Buffer* literal_buffer;
+ Vector<const char> literal_chars;
};
void Init(Handle<String> source,
@@ -380,10 +397,12 @@ class Scanner {
ParserLanguage language);
// Literal buffer support
- void StartLiteral();
- void AddChar(uc32 ch);
- void AddCharAdvance();
- void TerminateLiteral();
+ inline void StartLiteral();
+ inline void AddChar(uc32 ch);
+ inline void AddCharAdvance();
+ inline void TerminateLiteral();
+ // Stops scanning of a literal, e.g., due to an encountered error.
+ inline void DropLiteral();
// Low-level scanning support.
void Advance() { c0_ = source_->Advance(); }
@@ -487,9 +506,8 @@ class Scanner {
SafeStringInputBuffer safe_string_input_buffer_;
// Buffer to hold literal values (identifiers, strings, numbers)
- // using 0-terminated UTF-8 encoding.
- UTF8Buffer literal_buffer_1_;
- UTF8Buffer literal_buffer_2_;
+ // using '\x00'-terminated UTF-8 encoding. Handles allocation internally.
+ UTF8Buffer literal_buffer_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 0057d18f1d..cde7577c7a 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -241,16 +241,6 @@ void ExternalReferenceTable::PopulateTable() {
DEBUG_ADDRESS,
Debug::k_restarter_frame_function_pointer << kDebugIdShift,
"Debug::restarter_frame_function_pointer_address()");
- const char* debug_register_format = "Debug::register_address(%i)";
- int dr_format_length = StrLength(debug_register_format);
- for (int i = 0; i < kNumJSCallerSaved; ++i) {
- Vector<char> name = Vector<char>::New(dr_format_length + 1);
- OS::SNPrintF(name, debug_register_format, i);
- Add(Debug_Address(Debug::k_register_address, i).address(),
- DEBUG_ADDRESS,
- Debug::k_register_address << kDebugIdShift | i,
- name.start());
- }
#endif
// Stat counters
@@ -831,6 +821,12 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, kLargeFixedArray) \
CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+#define ONE_PER_CODE_SPACE(where, how, within) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, kLargeCode) \
+ CASE_BODY(where, how, within, LO_SPACE, kUnknownOffsetFromStart)
+
#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
space_number, \
offset_from_start) \
@@ -862,6 +858,8 @@ void Deserializer::ReadChunk(Object** current,
// Deserialize a new object and write a pointer to it to the current
// object.
ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
+ // Support for direct instruction pointers in functions
+ ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
@@ -870,11 +868,14 @@ void Deserializer::ReadChunk(Object** current,
ALL_SPACES(kBackref, kPlain, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
- // to the current code object.
+ // to the current code object or the instruction pointer in a function
+ // object.
ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
+ ALL_SPACES(kBackref, kPlain, kFirstInstruction)
// Find an already deserialized object using its offset from the start
// and write a pointer to it to the current object.
ALL_SPACES(kFromStart, kPlain, kStartOfObject)
+ ALL_SPACES(kFromStart, kPlain, kFirstInstruction)
// Find an already deserialized code object using its offset from the
// start and write a pointer to its first instruction to the current code
// object.
@@ -894,6 +895,14 @@ void Deserializer::ReadChunk(Object** current,
kStartOfObject,
0,
kUnknownOffsetFromStart)
+ // Find an code entry in the partial snapshots cache and
+ // write a pointer to it to the current object.
+ CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0)
+ CASE_BODY(kPartialSnapshotCache,
+ kPlain,
+ kFirstInstruction,
+ 0,
+ kUnknownOffsetFromStart)
// Find an external reference and write a pointer to it to the current
// object.
CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
@@ -1336,6 +1345,14 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
}
+void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ OutputRawData(entry_address);
+ serializer_->SerializeObject(target, kPlain, kFirstInstruction);
+ bytes_processed_so_far_ += kPointerSize;
+}
+
+
void Serializer::ObjectSerializer::VisitExternalAsciiString(
v8::String::ExternalAsciiStringResource** resource_pointer) {
Address references_start = reinterpret_cast<Address>(resource_pointer);
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index d1b668d13b..92a514975c 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -448,6 +448,7 @@ class Serializer : public SerializerDeserializer {
void VisitPointers(Object** start, Object** end);
void VisitExternalReferences(Address* start, Address* end);
void VisitCodeTarget(RelocInfo* target);
+ void VisitCodeEntry(Address entry_address);
void VisitRuntimeEntry(RelocInfo* reloc);
// Used for seralizing the external strings that hold the natives source.
void VisitExternalAsciiString(
@@ -536,7 +537,8 @@ class PartialSerializer : public Serializer {
// would cause dupes.
ASSERT(!o->IsScript());
return o->IsString() || o->IsSharedFunctionInfo() ||
- o->IsHeapNumber() || o->IsCode();
+ o->IsHeapNumber() || o->IsCode() ||
+ o->map() == Heap::fixed_cow_array_map();
}
private:
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index d49c207518..fbb26732e5 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -220,21 +220,22 @@ void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
- watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
+ watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
- return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
- flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
+ flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ watermark_invalidated_mark_;
} else {
- flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
- (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
+ flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
}
ASSERT(IsWatermarkValid() == !value);
@@ -242,15 +243,15 @@ void Page::InvalidateWatermark(bool value) {
bool Page::GetPageFlag(PageFlag flag) {
- return (flags_ & flag) != 0;
+ return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
- flags_ |= flag;
+ flags_ |= static_cast<intptr_t>(1 << flag);
} else {
- flags_ &= ~flag;
+ flags_ &= ~static_cast<intptr_t>(1 << flag);
}
}
@@ -289,10 +290,27 @@ void Page::SetIsLargeObjectPage(bool is_large_object_page) {
SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
}
+bool Page::IsPageExecutable() {
+ return GetPageFlag(IS_EXECUTABLE);
+}
+
+
+void Page::SetIsPageExecutable(bool is_page_executable) {
+ SetPageFlag(IS_EXECUTABLE, is_page_executable);
+}
+
// -----------------------------------------------------------------------------
// MemoryAllocator
+void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
+ address_ = a;
+ size_ = s;
+ owner_ = o;
+ executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+}
+
+
bool MemoryAllocator::IsValidChunk(int chunk_id) {
if (!IsValidChunkId(chunk_id)) return false;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 2bb58b817c..3d2d42f098 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -41,7 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
-intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
+intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -68,6 +68,12 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
}
+HeapObjectIterator::HeapObjectIterator(Page* page,
+ HeapObjectCallback size_func) {
+ Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+}
+
+
void HeapObjectIterator::Initialize(Address cur, Address end,
HeapObjectCallback size_f) {
cur_addr_ = cur;
@@ -266,6 +272,10 @@ void CodeRange::TearDown() {
//
int MemoryAllocator::capacity_ = 0;
int MemoryAllocator::size_ = 0;
+int MemoryAllocator::size_executable_ = 0;
+
+List<MemoryAllocator::MemoryAllocationCallbackRegistration>
+ MemoryAllocator::memory_allocation_callbacks_;
VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
@@ -308,6 +318,7 @@ bool MemoryAllocator::Setup(int capacity) {
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
+ size_executable_ = 0;
ChunkInfo info; // uninitialized element.
for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
chunks_.Add(info);
@@ -353,6 +364,8 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
+
+ if (executable == EXECUTABLE) size_executable_ += alloced;
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
@@ -361,7 +374,9 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
}
-void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+void MemoryAllocator::FreeRawMemory(void* mem,
+ size_t length,
+ Executability executable) {
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
#endif
@@ -372,10 +387,57 @@ void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
}
Counters::memory_allocated.Decrement(static_cast<int>(length));
size_ -= static_cast<int>(length);
+ if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+
ASSERT(size_ >= 0);
}
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ MemoryAllocationCallbackRegistration registration =
+ memory_allocation_callbacks_[i];
+ if ((registration.space & space) == space &&
+ (registration.action & action) == action)
+ registration.callback(space, action, static_cast<int>(size));
+ }
+}
+
+
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+ MemoryAllocationCallback callback) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) return true;
+ }
+ return false;
+}
+
+
+void MemoryAllocator::AddMemoryAllocationCallback(
+ MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ ASSERT(callback != NULL);
+ MemoryAllocationCallbackRegistration registration(callback, space, action);
+ ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+ return memory_allocation_callbacks_.Add(registration);
+}
+
+
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback) {
+ ASSERT(callback != NULL);
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) {
+ memory_allocation_callbacks_.Remove(i);
+ return;
+ }
+ }
+ UNREACHABLE();
+}
+
void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
ASSERT(initial_chunk_ == NULL);
@@ -425,7 +487,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
if (*allocated_pages == 0) {
- FreeRawMemory(chunk, chunk_size);
+ FreeRawMemory(chunk, chunk_size, owner->executable());
LOG(DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
@@ -433,6 +495,8 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
int chunk_id = Pop();
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+ ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+ PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
}
@@ -591,7 +655,10 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
} else {
LOG(DeleteEvent("PagedChunk", c.address()));
- FreeRawMemory(c.address(), c.size());
+ ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
+ size_t size = c.size();
+ FreeRawMemory(c.address(), size, c.executable());
+ PerformAllocationCallback(space, kAllocationActionFree, size);
}
c.init(NULL, 0, NULL);
Push(chunk_id);
@@ -1977,77 +2044,88 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
}
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
- if (will_compact) {
- // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
- // to skip unused pages. Update flag value for all pages in space.
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
- Page* last_in_use = AllocationTopPage();
- bool in_use = true;
-
- while (all_pages_iterator.has_next()) {
- Page* p = all_pages_iterator.next();
- p->SetWasInUseBeforeMC(in_use);
- if (p == last_in_use) {
- // We passed a page containing allocation top. All consequent
- // pages are not used.
- in_use = false;
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
+ const bool add_to_freelist = true;
+
+ // Mark used and unused pages to properly fill unused pages
+ // after reordering.
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (all_pages_iterator.has_next()) {
+ Page* p = all_pages_iterator.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
+
+ if (page_list_is_chunk_ordered_) return;
+
+ Page* new_last_in_use = Page::FromAddress(NULL);
+ MemoryAllocator::RelinkPageListInChunkOrder(this,
+ &first_page_,
+ &last_page_,
+ &new_last_in_use);
+ ASSERT(new_last_in_use->is_valid());
+
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+ last_in_use->AllocationTop());
+
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
+ if (size_in_bytes > 0) {
+ Address start = last_in_use->AllocationTop();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
- if (!page_list_is_chunk_ordered_) {
- Page* new_last_in_use = Page::FromAddress(NULL);
- MemoryAllocator::RelinkPageListInChunkOrder(this,
- &first_page_,
- &last_page_,
- &new_last_in_use);
- ASSERT(new_last_in_use->is_valid());
-
- if (new_last_in_use != last_in_use) {
- // Current allocation top points to a page which is now in the middle
- // of page list. We should move allocation top forward to the new last
- // used page so various object iterators will continue to work properly.
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
- last_in_use->AllocationTop());
-
- if (size_in_bytes > 0) {
- // There is still some space left on this page. Create a fake
- // object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page
- // correctly.
-
- Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
- size_in_bytes);
- }
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop());
- // New last in use page was in the middle of the list before
- // sorting so it full.
- SetTop(new_last_in_use->AllocationTop());
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ }
- ASSERT(AllocationTopPage() == new_last_in_use);
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
- }
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+ while (pages_in_use_iterator.has_next()) {
+ Page* p = pages_in_use_iterator.next();
+ if (!p->WasInUseBeforeMC()) {
+ // Empty page is in the middle of a sequence of used pages.
+ // Allocate it as a whole and deallocate immediately.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+ p->ObjectAreaStart());
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
- while (pages_in_use_iterator.has_next()) {
- Page* p = pages_in_use_iterator.next();
- if (!p->WasInUseBeforeMC()) {
- // Empty page is in the middle of a sequence of used pages.
- // Create a fake object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page correctly.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
- p->ObjectAreaStart());
-
- p->SetAllocationWatermark(p->ObjectAreaStart());
- Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
- }
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ Address start = p->ObjectAreaStart();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
-
- page_list_is_chunk_ordered_ = true;
}
}
+
+ page_list_is_chunk_ordered_ = true;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ RelinkPageListInChunkOrder(false);
+ }
}
@@ -2182,6 +2260,13 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
}
+void OldSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Free(start, size_in_bytes, add_to_freelist);
+}
+
+
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
@@ -2456,6 +2541,21 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
}
+void FixedSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ // Free-list elements in fixed space are assumed to have a fixed size.
+ // We break the free block into chunks and add them to the free list
+ // individually.
+ int size = object_size_in_bytes();
+ ASSERT(size_in_bytes % size == 0);
+ Address end = start + size_in_bytes;
+ for (Address a = start; a < end; a += size) {
+ Free(a, add_to_freelist);
+ }
+}
+
+
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
@@ -2552,10 +2652,15 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
if (mem == NULL) return NULL;
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
if (*chunk_size < requested) {
- MemoryAllocator::FreeRawMemory(mem, *chunk_size);
+ MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
LOG(DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
+ ObjectSpace space =
+ (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+ MemoryAllocator::PerformAllocationCallback(space,
+ kAllocationActionAllocate,
+ *chunk_size);
return reinterpret_cast<LargeObjectChunk*>(mem);
}
@@ -2590,7 +2695,15 @@ void LargeObjectSpace::TearDown() {
LargeObjectChunk* chunk = first_chunk_;
first_chunk_ = first_chunk_->next();
LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
- MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+ Executability executable =
+ page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
+ ObjectSpace space = kObjectSpaceLoSpace;
+ if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
+ size_t size = chunk->size();
+ MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
+ MemoryAllocator::PerformAllocationCallback(
+ space, kAllocationActionFree, size);
}
size_ = 0;
@@ -2654,6 +2767,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
+ page->SetIsPageExecutable(executable);
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@@ -2696,6 +2810,22 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
+
+LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+ // TODO(853): Change this implementation to only find executable
+ // chunks and use some kind of hash-based approach to speed it up.
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ Address chunk_address = chunk->address();
+ if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
+ return chunk;
+ }
+ }
+ return NULL;
+}
+
+
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
@@ -2768,6 +2898,10 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
previous = current;
current = current->next();
} else {
+ Page* page = Page::FromAddress(RoundUp(current->address(),
+ Page::kPageSize));
+ Executability executable =
+ page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Address chunk_address = current->address();
size_t chunk_size = current->size();
@@ -2783,7 +2917,11 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkCompactCollector::ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
- MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
+ ObjectSpace space = kObjectSpaceLoSpace;
+ if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
+ MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
+ MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
+ size_);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
}
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 051ce37cf8..9ffa940489 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -197,6 +197,10 @@ class Page {
inline void SetIsLargeObjectPage(bool is_large_object_page);
+ inline bool IsPageExecutable();
+
+ inline void SetIsPageExecutable(bool is_page_executable);
+
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
@@ -256,13 +260,16 @@ class Page {
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
enum PageFlag {
- IS_NORMAL_PAGE = 1 << 0,
- WAS_IN_USE_BEFORE_MC = 1 << 1,
+ IS_NORMAL_PAGE = 0,
+ WAS_IN_USE_BEFORE_MC,
// Page allocation watermark was bumped by preallocation during scavenge.
// Correct watermark can be retrieved by CachedAllocationWatermark() method
- WATERMARK_INVALIDATED = 1 << 2
+ WATERMARK_INVALIDATED,
+ IS_EXECUTABLE,
+ NUM_PAGE_FLAGS // Must be last
};
+ static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
// scavenge we just invalidate the watermark on each old space page after
@@ -291,7 +298,7 @@ class Page {
inline void ClearGCFields();
- static const int kAllocationWatermarkOffsetShift = 3;
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
static const uint32_t kAllocationWatermarkOffsetMask =
((1 << kAllocationWatermarkOffsetBits) - 1) <<
@@ -557,7 +564,20 @@ class MemoryAllocator : public AllStatic {
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable);
- static void FreeRawMemory(void* buf, size_t length);
+ static void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
+ static void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
+
+ static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+ static void RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback);
+ static bool MemoryAllocationCallbackRegistered(
+ MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -565,6 +585,9 @@ class MemoryAllocator : public AllStatic {
// Returns allocated spaces in bytes.
static int Size() { return size_; }
+ // Returns allocated executable spaces in bytes.
+ static int SizeExecutable() { return size_executable_; }
+
// Returns maximum available bytes that the old space can have.
static int MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
@@ -628,6 +651,22 @@ class MemoryAllocator : public AllStatic {
// Allocated space size in bytes.
static int size_;
+ // Allocated executable space size in bytes.
+ static int size_executable_;
+
+ struct MemoryAllocationCallbackRegistration {
+ MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action)
+ : callback(callback), space(space), action(action) {
+ }
+ MemoryAllocationCallback callback;
+ ObjectSpace space;
+ AllocationAction action;
+ };
+ // A List of callback that are triggered when memory is allocated or free'd
+ static List<MemoryAllocationCallbackRegistration>
+ memory_allocation_callbacks_;
// The initial chunk of virtual memory.
static VirtualMemory* initial_chunk_;
@@ -635,20 +674,23 @@ class MemoryAllocator : public AllStatic {
// Allocated chunk info: chunk start address, chunk size, and owning space.
class ChunkInfo BASE_EMBEDDED {
public:
- ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
- void init(Address a, size_t s, PagedSpace* o) {
- address_ = a;
- size_ = s;
- owner_ = o;
- }
+ ChunkInfo() : address_(NULL),
+ size_(0),
+ owner_(NULL),
+ executable_(NOT_EXECUTABLE) {}
+ inline void init(Address a, size_t s, PagedSpace* o);
Address address() { return address_; }
size_t size() { return size_; }
PagedSpace* owner() { return owner_; }
+ // We save executability of the owner to allow using it
+ // when collecting stats after the owner has been destroyed.
+ Executability executable() const { return executable_; }
private:
Address address_;
size_t size_;
PagedSpace* owner_;
+ Executability executable_;
};
// Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
@@ -742,6 +784,7 @@ class HeapObjectIterator: public ObjectIterator {
HeapObjectIterator(PagedSpace* space,
Address start,
HeapObjectCallback size_func);
+ HeapObjectIterator(Page* page, HeapObjectCallback size_func);
inline HeapObject* next() {
return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
@@ -1025,6 +1068,11 @@ class PagedSpace : public Space {
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
+ // Deallocates a block.
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) = 0;
+
// Set space allocation info.
void SetTop(Address top) {
allocation_info_.top = top;
@@ -1083,6 +1131,8 @@ class PagedSpace : public Space {
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
+
protected:
// Maximum capacity of this space.
int max_capacity_;
@@ -1800,6 +1850,10 @@ class OldSpace : public PagedSpace {
}
}
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
// Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -1874,6 +1928,9 @@ class FixedSpace : public PagedSpace {
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
@@ -2058,7 +2115,7 @@ class LargeObjectChunk {
LargeObjectChunk* next() { return next_; }
void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
- size_t size() { return size_; }
+ size_t size() { return size_ & ~Page::kPageFlagMask; }
void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
// Returns the object in this chunk.
@@ -2123,6 +2180,11 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
+ // Finds a large object page containing the given pc, returns NULL
+ // if such a page doesn't exist.
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 54d93845d3..7a490d3ec6 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -119,7 +119,7 @@ Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
Object* result =
- receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
+ receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -131,15 +131,14 @@ Object* StubCache::ComputeLoadField(String* name,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -152,15 +151,14 @@ Object* StubCache::ComputeLoadCallback(String* name,
AccessorInfo* callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -172,16 +170,15 @@ Object* StubCache::ComputeLoadConstant(String* name,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -192,15 +189,14 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -218,9 +214,8 @@ Object* StubCache::ComputeLoadGlobal(String* name,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
@@ -230,7 +225,7 @@ Object* StubCache::ComputeLoadGlobal(String* name,
is_dont_delete);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -242,15 +237,14 @@ Object* StubCache::ComputeKeyedLoadField(String* name,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -262,16 +256,15 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -282,16 +275,15 @@ Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -303,16 +295,15 @@ Object* StubCache::ComputeKeyedLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -325,14 +316,13 @@ Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
ASSERT(receiver->IsJSObject());
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -361,14 +351,13 @@ Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
JSFunction* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -387,7 +376,7 @@ Object* StubCache::ComputeStoreField(String* name,
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -409,7 +398,7 @@ Object* StubCache::ComputeStoreGlobal(String* name,
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -427,7 +416,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
code = compiler.CompileStoreCallback(receiver, callback, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -444,7 +433,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
code = compiler.CompileStoreInterceptor(receiver, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -462,7 +451,7 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(
Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -481,7 +470,7 @@ Object* StubCache::ComputeCallConstant(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// Compute check type based on receiver/holder.
StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@@ -499,7 +488,7 @@ Object* StubCache::ComputeCallConstant(int argc,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
@@ -513,7 +502,7 @@ Object* StubCache::ComputeCallConstant(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -530,7 +519,7 @@ Object* StubCache::ComputeCallField(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -544,7 +533,7 @@ Object* StubCache::ComputeCallField(int argc,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallField(JSObject::cast(object),
@@ -555,7 +544,7 @@ Object* StubCache::ComputeCallField(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -570,7 +559,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -585,7 +574,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
cache_holder,
NOT_IN_LOOP,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
@@ -595,7 +584,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -623,14 +612,14 @@ Object* StubCache::ComputeCallGlobal(int argc,
JSFunction* function) {
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(receiver, holder);
- Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
@@ -643,7 +632,7 @@ Object* StubCache::ComputeCallGlobal(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 663201b9d0..bf14a4fe28 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -56,161 +56,169 @@ class StubCache : public AllStatic {
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
- static Object* ComputeLoadNonexistent(String* name, JSObject* receiver);
+ MUST_USE_RESULT static Object* ComputeLoadNonexistent(String* name,
+ JSObject* receiver);
- static Object* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT static Object* ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- static Object* ComputeLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
- static Object* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ MUST_USE_RESULT static Object* ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- static Object* ComputeLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder);
+ MUST_USE_RESULT static Object* ComputeLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
- static Object* ComputeLoadNormal();
+ MUST_USE_RESULT static Object* ComputeLoadNormal();
- static Object* ComputeLoadGlobal(String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- bool is_dont_delete);
+ MUST_USE_RESULT static Object* ComputeLoadGlobal(String* name,
+ JSObject* receiver,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ bool is_dont_delete);
// ---
- static Object* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- static Object* ComputeKeyedLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadCallback(
+ String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
- static Object* ComputeKeyedLoadConstant(String* name, JSObject* receiver,
- JSObject* holder, Object* value);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- static Object* ComputeKeyedLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
- static Object* ComputeKeyedLoadArrayLength(String* name, JSArray* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadArrayLength(String* name,
+ JSArray* receiver);
- static Object* ComputeKeyedLoadStringLength(String* name,
- String* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadStringLength(String* name,
+ String* receiver);
- static Object* ComputeKeyedLoadFunctionPrototype(String* name,
- JSFunction* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadFunctionPrototype(
+ String* name,
+ JSFunction* receiver);
// ---
- static Object* ComputeStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition = NULL);
+ MUST_USE_RESULT static Object* ComputeStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
- static Object* ComputeStoreNormal();
+ MUST_USE_RESULT static Object* ComputeStoreNormal();
- static Object* ComputeStoreGlobal(String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell);
+ MUST_USE_RESULT static Object* ComputeStoreGlobal(String* name,
+ GlobalObject* receiver,
+ JSGlobalPropertyCell* cell);
- static Object* ComputeStoreCallback(String* name,
- JSObject* receiver,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeStoreCallback(String* name,
+ JSObject* receiver,
+ AccessorInfo* callback);
- static Object* ComputeStoreInterceptor(String* name, JSObject* receiver);
+ MUST_USE_RESULT static Object* ComputeStoreInterceptor(String* name,
+ JSObject* receiver);
// ---
- static Object* ComputeKeyedStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition = NULL);
+ MUST_USE_RESULT static Object* ComputeKeyedStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
// ---
- static Object* ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
-
- static Object* ComputeCallConstant(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function);
-
- static Object* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver);
-
- static Object* ComputeCallInterceptor(int argc,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder);
-
- static Object* ComputeCallGlobal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function);
+ MUST_USE_RESULT static Object* ComputeCallField(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index);
+
+ MUST_USE_RESULT static Object* ComputeCallConstant(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ JSFunction* function);
+
+ MUST_USE_RESULT static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ JSObject* receiver);
+
+ MUST_USE_RESULT static Object* ComputeCallInterceptor(int argc,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder);
+
+ MUST_USE_RESULT static Object* ComputeCallGlobal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ JSObject* receiver,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function);
// ---
- static Object* ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallPreMonomorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallPreMonomorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallMiss(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallMiss(int argc, Code::Kind kind);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Code* FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* ComputeCallDebugBreak(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallDebugBreak(int argc,
+ Code::Kind kind);
- static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallDebugPrepareStepIn(int argc,
+ Code::Kind kind);
#endif
-
// Update cache for entry hash(name, map).
static Code* Set(String* name, Map* map, Code* code);
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 0d8960b8ee..ebc7fea1cd 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -248,6 +248,10 @@ class Token {
return op == INC || op == DEC;
}
+ static bool IsShiftOp(Value op) {
+ return (SHL <= op) && (op <= SHR);
+ }
+
// Returns a string corresponding to the JS token string
// (.e., "<" for the token LT) or NULL if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 82960270b2..e172cb8668 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -69,7 +69,6 @@ void ThreadLocalTop::Initialize() {
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0;
#endif
- stack_is_cooked_ = false;
try_catch_handler_address_ = NULL;
context_ = NULL;
int id = ThreadManager::CurrentId();
@@ -303,39 +302,6 @@ void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
}
-void Top::MarkCompactPrologue(bool is_compacting) {
- MarkCompactPrologue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, char* data) {
- MarkCompactPrologue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::CookFramesForThread(thread);
- }
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, char* data) {
- MarkCompactEpilogue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting) {
- MarkCompactEpilogue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::UncookFramesForThread(thread);
- }
-}
-
static int stack_trace_nesting_level = 0;
static StringStream* incomplete_message = NULL;
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 87333931eb..776c43e346 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -104,9 +104,6 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif
- bool stack_is_cooked_;
- inline bool stack_is_cooked() { return stack_is_cooked_; }
- inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
// Generated code scratch locations.
int32_t formal_count_;
@@ -260,12 +257,6 @@ class Top {
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
- static void MarkCompactPrologue(bool is_compacting,
- char* archived_thread_data);
- static void MarkCompactEpilogue(bool is_compacting,
- char* archived_thread_data);
static void PrintCurrentStackTrace(FILE* out);
static void PrintStackTrace(FILE* out, char* thread_data);
static void PrintStack(StringStream* accumulator);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 2d0a1bfbdc..d605891e6d 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -326,9 +326,9 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(from < length_);
ASSERT(to <= length_);
ASSERT(from < to);
+ ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
@@ -476,6 +476,213 @@ inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
}
+/*
+ * A class that collects values into a backing store.
+ * Specialized versions of the class can allow access to the backing store
+ * in different ways.
+ * There is no guarantee that the backing store is contiguous (and, as a
+ * consequence, no guarantees that consecutively added elements are adjacent
+ * in memory). The collector may move elements unless it has guaranteed not
+ * to.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class Collector {
+ public:
+ explicit Collector(int initial_capacity = kMinCapacity)
+ : index_(0), size_(0) {
+ if (initial_capacity < kMinCapacity) {
+ initial_capacity = kMinCapacity;
+ }
+ current_chunk_ = Vector<T>::New(initial_capacity);
+ }
+
+ virtual ~Collector() {
+ // Free backing store (in reverse allocation order).
+ current_chunk_.Dispose();
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ }
+
+ // Add a single element.
+ inline void Add(T value) {
+ if (index_ >= current_chunk_.length()) {
+ Grow(1);
+ }
+ current_chunk_[index_] = value;
+ index_++;
+ size_++;
+ }
+
+ // Add a block of contiguous elements and return a Vector backed by the
+ // memory area.
+ // A basic Collector will keep this vector valid as long as the Collector
+ // is alive.
+ inline Vector<T> AddBlock(int size, T initial_value) {
+ ASSERT(size > 0);
+ if (size > current_chunk_.length() - index_) {
+ Grow(size);
+ }
+ T* position = current_chunk_.start() + index_;
+ index_ += size;
+ size_ += size;
+ for (int i = 0; i < size; i++) {
+ position[i] = initial_value;
+ }
+ return Vector<T>(position, size);
+ }
+
+
+ // Write the contents of the collector into the provided vector.
+ void WriteTo(Vector<T> destination) {
+ ASSERT(size_ <= destination.length());
+ int position = 0;
+ for (int i = 0; i < chunks_.length(); i++) {
+ Vector<T> chunk = chunks_.at(i);
+ for (int j = 0; j < chunk.length(); j++) {
+ destination[position] = chunk[j];
+ position++;
+ }
+ }
+ for (int i = 0; i < index_; i++) {
+ destination[position] = current_chunk_[i];
+ position++;
+ }
+ }
+
+ // Allocate a single contiguous vector, copy all the collected
+ // elements to the vector, and return it.
+ // The caller is responsible for freeing the memory of the returned
+ // vector (e.g., using Vector::Dispose).
+ Vector<T> ToVector() {
+ Vector<T> new_store = Vector<T>::New(size_);
+ WriteTo(new_store);
+ return new_store;
+ }
+
+ // Resets the collector to be empty.
+ virtual void Reset() {
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ chunks_.Rewind(0);
+ index_ = 0;
+ size_ = 0;
+ }
+
+ // Total number of elements added to collector so far.
+ inline int size() { return size_; }
+
+ protected:
+ static const int kMinCapacity = 16;
+ List<Vector<T> > chunks_;
+ Vector<T> current_chunk_; // Block of memory currently being written into.
+ int index_; // Current index in current chunk.
+ int size_; // Total number of elements in collector.
+
+ // Creates a new current chunk, and stores the old chunk in the chunks_ list.
+ void Grow(int min_capacity) {
+ ASSERT(growth_factor > 1);
+ int growth = current_chunk_.length() * (growth_factor - 1);
+ if (growth > max_growth) {
+ growth = max_growth;
+ }
+ int new_capacity = current_chunk_.length() + growth;
+ if (new_capacity < min_capacity) {
+ new_capacity = min_capacity + growth;
+ }
+ Vector<T> new_chunk = Vector<T>::New(new_capacity);
+ int new_index = PrepareGrow(new_chunk);
+ if (index_ > 0) {
+ chunks_.Add(current_chunk_.SubVector(0, index_));
+ } else {
+ // Can happen if the call to PrepareGrow moves everything into
+ // the new chunk.
+ current_chunk_.Dispose();
+ }
+ current_chunk_ = new_chunk;
+ index_ = new_index;
+ ASSERT(index_ + min_capacity <= current_chunk_.length());
+ }
+
+ // Before replacing the current chunk, give a subclass the option to move
+ // some of the current data into the new chunk. The function may update
+ // the current index_ value to represent data no longer in the current chunk.
+ // Returns the initial index of the new chunk (after copied data).
+ virtual int PrepareGrow(Vector<T> new_chunk) {
+ return 0;
+ }
+};
+
+
+/*
+ * A collector that allows sequences of values to be guaranteed to
+ * stay consecutive.
+ * If the backing store grows while a sequence is active, the current
+ * sequence might be moved, but after the sequence is ended, it will
+ * not move again.
+ * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
+ * as well, if inside an active sequence where another element is added.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class SequenceCollector : public Collector<T, growth_factor, max_growth> {
+ public:
+ explicit SequenceCollector(int initial_capacity)
+ : Collector<T, growth_factor, max_growth>(initial_capacity),
+ sequence_start_(kNoSequence) { }
+
+ virtual ~SequenceCollector() {}
+
+ void StartSequence() {
+ ASSERT(sequence_start_ == kNoSequence);
+ sequence_start_ = this->index_;
+ }
+
+ Vector<T> EndSequence() {
+ ASSERT(sequence_start_ != kNoSequence);
+ int sequence_start = sequence_start_;
+ sequence_start_ = kNoSequence;
+ if (sequence_start == this->index_) return Vector<T>();
+ return this->current_chunk_.SubVector(sequence_start, this->index_);
+ }
+
+ // Drops the currently added sequence, and all collected elements in it.
+ void DropSequence() {
+ ASSERT(sequence_start_ != kNoSequence);
+ int sequence_length = this->index_ - sequence_start_;
+ this->index_ = sequence_start_;
+ this->size_ -= sequence_length;
+ sequence_start_ = kNoSequence;
+ }
+
+ virtual void Reset() {
+ sequence_start_ = kNoSequence;
+ this->Collector<T, growth_factor, max_growth>::Reset();
+ }
+
+ private:
+ static const int kNoSequence = -1;
+ int sequence_start_;
+
+ // Move the currently active sequence to the new chunk.
+ virtual int PrepareGrow(Vector<T> new_chunk) {
+ if (sequence_start_ != kNoSequence) {
+ int sequence_length = this->index_ - sequence_start_;
+ // The new chunk is always larger than the current chunk, so there
+ // is room for the copy.
+ ASSERT(sequence_length < new_chunk.length());
+ for (int i = 0; i < sequence_length; i++) {
+ new_chunk[i] = this->current_chunk_[sequence_start_ + i];
+ }
+ this->index_ = sequence_start_;
+ sequence_start_ = 0;
+ return sequence_length;
+ }
+ return 0;
+ }
+};
+
+
// Simple support to read a file into a 0-terminated C-string.
// The returned buffer must be freed by the caller.
// On return, *exits tells whether the file existed.
@@ -740,8 +947,8 @@ inline Dest BitCast(const Source& source) {
}
template <class Dest, class Source>
-inline Dest BitCast(Source* & source) {
- return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+inline Dest BitCast(Source* source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} } // namespace v8::internal
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 64ea9fccf6..8c948cc7b3 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -67,6 +67,7 @@ namespace internal {
SC(pcre_mallocs, V8.PcreMallocCount) \
/* OS Memory allocated */ \
SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(normalized_maps, V8.NormalizedMaps) \
SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
SC(alive_after_last_gc, V8.AliveAfterLastGC) \
@@ -84,6 +85,11 @@ namespace internal {
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
SC(regexp_cache_hits, V8.RegExpCacheHits) \
SC(regexp_cache_misses, V8.RegExpCacheMisses) \
+ SC(string_ctor_calls, V8.StringConstructorCalls) \
+ SC(string_ctor_conversions, V8.StringConstructorConversions) \
+ SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
+ SC(string_ctor_string_value, V8.StringConstructorStringValue) \
+ SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
/* Amount of evaled source code. */ \
SC(total_eval_size, V8.TotalEvalSize) \
/* Amount of loaded source code. */ \
@@ -92,6 +98,8 @@ namespace internal {
SC(total_parse_size, V8.TotalParseSize) \
/* Amount of source code skipped over using preparsing. */ \
SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Number of symbol lookups skipped using preparsing */ \
+ SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
/* Amount of compiled source code. */ \
SC(total_compile_size, V8.TotalCompileSize) \
/* Amount of source code compiled with the old codegen. */ \
@@ -101,7 +109,10 @@ namespace internal {
/* Number of contexts created from scratch. */ \
SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
/* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
+ SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
+ /* Number of code objects found from pc. */ \
+ SC(pc_to_code, V8.PcToCode) \
+ SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
@@ -120,6 +131,8 @@ namespace internal {
V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
+ SC(map_slow_to_fast_elements, V8.MapSlowToFastElements) \
+ SC(map_fast_to_slow_elements, V8.MapFastToSlowElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
@@ -155,6 +168,9 @@ namespace internal {
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(store_normal_miss, V8.StoreNormalMiss) \
SC(store_normal_hit, V8.StoreNormalHit) \
+ SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
+ SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
+ SC(cow_arrays_converted, V8.COWArraysConverted) \
SC(call_miss, V8.CallMiss) \
SC(keyed_call_miss, V8.KeyedCallMiss) \
SC(load_miss, V8.LoadMiss) \
@@ -189,6 +205,7 @@ namespace internal {
SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(sub_string_native, V8.SubStringNative) \
+ SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
SC(string_compare_native, V8.StringCompareNative) \
SC(string_compare_runtime, V8.StringCompareRuntime) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index f761d3812f..9dbdf4c28a 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -60,9 +60,6 @@
#include "flags.h"
// Objects & heap
-#include "objects.h"
-#include "spaces.h"
-#include "heap.h"
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 85540e85df..ca1c99d4c1 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -111,13 +111,20 @@ function GlobalParseInt(string, radix) {
if (!(radix == 0 || (2 <= radix && radix <= 36)))
return $NaN;
}
- return %StringParseInt(ToString(string), radix);
+ string = TO_STRING_INLINE(string);
+ if (%_HasCachedArrayIndex(string) &&
+ (radix == 0 || radix == 10)) {
+ return %_GetCachedArrayIndex(string);
+ }
+ return %StringParseInt(string, radix);
}
// ECMA-262 - 15.1.2.3
function GlobalParseFloat(string) {
- return %StringParseFloat(ToString(string));
+ string = TO_STRING_INLINE(string);
+ if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
+ return %StringParseFloat(string);
}
@@ -743,8 +750,8 @@ function ObjectSeal(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) desc.setConfigurable(false);
DefineOwnProperty(obj, name, desc, true);
@@ -759,8 +766,8 @@ function ObjectFreeze(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (IsDataDescriptor(desc)) desc.setWritable(false);
if (desc.isConfigurable()) desc.setConfigurable(false);
@@ -786,8 +793,8 @@ function ObjectIsSealed(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) return false;
}
@@ -804,8 +811,8 @@ function ObjectIsFrozen(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (IsDataDescriptor(desc) && desc.isWritable()) return false;
if (desc.isConfigurable()) return false;
@@ -836,6 +843,7 @@ function ObjectIsExtensible(obj) {
}
});
+%SetExpectedNumberOfProperties($Object, 4);
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 1e5e82e644..b6e656d25a 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -342,28 +342,6 @@ void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
}
-void ThreadManager::MarkCompactPrologue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactPrologue(is_compacting, data);
- }
-}
-
-
-void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactEpilogue(is_compacting, data);
- }
-}
-
-
int ThreadManager::CurrentId() {
return Thread::GetThreadLocalInt(thread_id_key);
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index ca42354c43..da56d0525c 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -105,8 +105,6 @@ class ThreadManager : public AllStatic {
static void Iterate(ObjectVisitor* v);
static void IterateArchivedThreads(ThreadVisitor* v);
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
static int CurrentId();
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 61c0a0e65d..0af17958d8 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 3
-#define BUILD_NUMBER 8
+#define MINOR_VERSION 4
+#define BUILD_NUMBER 2
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 4f2d2b9616..85ad63719a 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -310,8 +310,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(not_equal,
Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
@@ -876,6 +875,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // TODO(849): implement custom construct stub.
+ // Generate a copy of the generic stub for now.
+ Generate_JSConstructStubGeneric(masm);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -898,10 +904,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rdi: called object
// rax: number of arguments
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc+1.
- __ movq(Operand(rsp, rax, times_pointer_size, kPointerSize), rdi);
// Set expected number of arguments to zero (not changing rax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
new file mode 100644
index 0000000000..c75b9455b2
--- /dev/null
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -0,0 +1,4015 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in rsi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function info from the stack.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
+ __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
+ __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(rcx); // Temporarily remove return address.
+ __ pop(rdx);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+ // Setup the fixed slots.
+ __ xor_(rbx, rbx); // Set to NULL.
+ __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+
+ // Copy the global object from the surrounding context.
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: constant elements.
+ // [rsp + (2 * kPointerSize)]: literal index.
+ // [rsp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into rcx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(rcx);
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ expected_map_index);
+ __ Assert(equal, message);
+ __ pop(rcx);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ // We don't use CmpObjectType because we manipulate the type field.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ __ j(above_equal, &not_string);
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rdx);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result);
+ // HeapNumber => false iff +0, -0, or NaN.
+ // These three cases set the zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in rax.
+ __ bind(&true_result);
+ __ movq(rax, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ xor_(rax, rax);
+ __ ret(1 * kPointerSize);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ movq(right_arg, right);
+ __ Move(left_arg, left);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
+};
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
+ // dividend in rax and rdx free for the division. Use rax, rbx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
+
+ Label not_smis;
+ // 2. Smi check both operands.
+ if (static_operands_type_.IsSmi()) {
+ // Skip smi check if we know that both arguments are smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ // Handle OR here, since we do extra smi-checking in the or code below.
+ __ SmiOr(right, right, left);
+ GenerateReturn(masm);
+ return;
+ }
+ } else {
+ if (op_ != Token::BIT_OR) {
+ // Skip the check for OR as it is better combined with the
+ // actual operation.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ }
+
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::ADD: {
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+ }
+
+ case Token::SUB: {
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+ }
+
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
+ case Token::DIV:
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::MOD:
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
+ break;
+
+ case Token::BIT_OR:
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis);
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ switch (op_) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, slow);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // 4. Emit return of result in rax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ ASSERT(use_fp_on_smis.is_linked());
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
+
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
+
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ // rax: y
+ // rdx: x
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ __ movq(rax, rdx);
+ break;
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // A perfect moment to try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
+ } else {
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
+ }
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ GenerateReturn(masm);
+
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
+ // Allocate a heap number if needed.
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ }
+
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+
+ if (HasArgsReversed()) {
+ lhs = rax;
+ rhs = rdx;
+ } else {
+ lhs = rdx;
+ rhs = rax;
+ }
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in rdx and rax.
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ Condition is_smi;
+ is_smi = masm->CheckSmi(lhs);
+ __ j(is_smi, &not_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &not_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ ASSERT(!HasArgsInRegisters());
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(rcx);
+ if (HasArgsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ // Left and right arguments are already on stack.
+ __ pop(rcx); // Save the return address.
+
+ // Push this stub's key.
+ __ Push(Smi::FromInt(MinorKey()));
+
+ // Although the operation and the type info are encoded into the key,
+ // the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(op_));
+
+ __ Push(Smi::FromInt(runtime_operands_type_));
+
+ __ push(rcx); // The return address.
+
+ // Perform patching to an appropriate fast case and return the result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kPointerSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+ __ bind(&loaded);
+ // ST[0] == double value
+ // rbx = bits of double value.
+ // rdx = also bits of double value.
+ // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
+ // h = h0 = bits ^ (bits >> 32);
+ // h ^= h >> 16;
+ // h ^= h >> 8;
+ // h = h & (cacheSize - 1);
+ // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+ __ sar(rdx, Immediate(32));
+ __ xorl(rdx, rbx);
+ __ movl(rcx, rdx);
+ __ movl(rax, rdx);
+ __ movl(rdi, rdx);
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
+ __ xorl(rcx, rdx);
+ __ xorl(rax, rdi);
+ __ xorl(rcx, rax);
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // rbx = bits of double value.
+ // rcx = TranscendentalCache::hash(double value).
+ __ movq(rax, ExternalReference::transcendental_cache_array_address());
+ // rax points to cache array.
+ __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // rax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ testq(rax, rax);
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ // Two uint_32's and a pointer per element.
+ CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+ CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+ CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ }
+#endif
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+ __ addl(rcx, rcx);
+ __ lea(rcx, Operand(rax, rcx, times_8, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmpq(rbx, Operand(rcx, 0));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ movq(rax, Operand(rcx, 2 * kIntSize));
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ Label nan_result;
+ GenerateOperation(masm, &nan_result);
+ __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+
+ __ bind(&nan_result);
+ __ fstp(0); // Remove argument from FPU stack.
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ ret(kPointerSize);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+ Label* on_nan_result) {
+ // Registers:
+ // rbx: Bits of input double. Must be preserved.
+ // rcx: Pointer to cache entry. Must be preserved.
+ // st(0): Input double
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ movq(rdi, rbx);
+ // Move exponent and sign bits to low bits.
+ __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+ // Remove sign bit.
+ __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
+ int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+ __ cmpl(rdi, Immediate(supported_exponent_limit));
+ __ j(below, &in_range);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmpl(rdi, Immediate(0x7ff));
+ __ j(equal, on_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ // FPU Stack: input % 2*pi, 2*pi,
+ __ fstp(0);
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
+
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
+ } else {
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
+ }
+
+ __ bind(&done);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ // Check float operands.
+ Label done;
+ Label rax_is_smi;
+ Label rax_is_object;
+ Label rdx_is_object;
+
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
+
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
+
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
+ __ SmiToInteger32(rcx, rax);
+
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, rdx);
+
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the rax heap number in rcx.
+ IntegerConvert(masm, rcx, rax);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ if (negative_zero_ == kIgnoreNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(equal, &done);
+ }
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ if (negative_zero_ == kStrictNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+ } else {
+ __ jmp(&slow);
+ }
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
+
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(rdx, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testl(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
+
+ // Copy the JS object part.
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ SmiTest(rcx);
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
+
+
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (unsigned comparison).
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ andb(rbx, Immediate(
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // rax: first part of cons string.
+ // rbx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask | kStringEncodingMask));
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // rax: subject string (sequential ascii)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // rax: subject string (flat two-byte)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r11: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
+
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
+
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ j(zero, &setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
+
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
+
+ // Locate the code entry and call it.
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
+
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
+
+ // Check the result.
+ Label success;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
+
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
+ __ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ Register probe = mask;
+ __ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
+ __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&is_smi);
+ __ SmiToInteger32(scratch, object);
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask) {
+ __ and_(hash, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ __ shl(hash, Immediate(kPointerSizeLog2 + 1));
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ movq(rbx, Operand(rsp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+ // The compare stub returns a positive, negative, or zero 64-bit integer
+ // value in rax, corresponding to result of comparing the two inputs.
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Two identical objects are equal unless they are both NaN or undefined.
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, &not_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check_for_nan);
+ __ Set(rax, NegativeComparisonResult(cc_));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(rax, EQUAL);
+ __ ret(0);
+ } else {
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
+ }
+
+ __ bind(&not_identical);
+ }
+
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
+
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ __ SelectNonSmi(rbx, rax, rdx, &not_smis);
+
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal. ebx (the lower half of rbx) is not zero.
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ }
+
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // If the first object is a JS object, we have done pointer comparison.
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (eax (not rax) is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax (not rax) already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, &not_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &not_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0);
+ __ bind(&not_both_objects);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ }
+
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ Push(Smi::FromInt(0));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ __ pop(rcx);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ pop(rbp); // pop frame pointer
+ __ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(rsi, rsi); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmpq(rbp, Immediate(0));
+ __ j(equal, &skip);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+ __ ret(0);
+}
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label empty_result;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(kStackSpace, 0);
+ ASSERT_EQ(kArgc, 4);
+#ifdef _WIN64
+ // All the parameters should be set up by a caller.
+#else
+ // Set 1st parameter register with property name.
+ __ movq(rsi, rdx);
+ // Second parameter register rdi should be set with pointer to AccessorInfo
+ // by a caller.
+#endif
+ // Call the api function!
+ __ movq(rax,
+ reinterpret_cast<int64_t>(fun()->address()),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(rax);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ movq(rsi, scheduled_exception_address);
+ __ Cmp(Operand(rsi, 0), Factory::the_hole_value());
+ __ j(not_equal, &promote_scheduled_exception);
+#ifdef _WIN64
+ // rax keeps a pointer to v8::Handle, unpack it.
+ __ movq(rax, Operand(rax, 0));
+#endif
+ // Check if the result handle holds 0.
+ __ testq(rax, rax);
+ __ j(zero, &empty_result);
+ // It was non-zero. Dereference to get the result value.
+ __ movq(rax, Operand(rax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame();
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ bind(&empty_result);
+ // It was zero; the result is undefined.
+ __ Move(rax, Factory::undefined_value());
+ __ jmp(&prologue);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // rax: result parameter for PerformGC, if any.
+ // rbx: pointer to C function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
+ // This pointer is reused in LeaveExitFrame(), so it is stored in a
+ // callee-saved register.
+
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack is known to be aligned. This function takes one argument which is
+ // passed in register.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // _WIN64
+ __ movq(rdi, rax);
+#endif
+ __ movq(kScratchRegister,
+ FUNCTION_ADDR(Runtime::PerformGC),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(kScratchRegister);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ incl(Operand(kScratchRegister, 0));
+ }
+
+ // Call C function.
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+ __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
+ if (result_size_ < 2) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax).
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ } else {
+ ASSERT_EQ(2, result_size_);
+ // Pass a pointer to the result location as the first argument.
+ __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+ }
+
+#else // _WIN64
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+ __ movq(rdi, r14); // argc.
+ __ movq(rsi, r12); // argv.
+#endif
+ __ call(rbx);
+ // Result is in rax - do not destroy this register!
+
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ decl(Operand(kScratchRegister, 0));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+ // If return value is on the stack, pop it to registers.
+ if (result_size_ > 1) {
+ ASSERT_EQ(2, result_size_);
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
+ __ movq(rax, Operand(rsp, 6 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ }
+#endif
+ __ lea(rcx, Operand(rax, 1));
+ // Lower 2 bits of rcx are 0 iff rax has failure tag.
+ __ testl(rcx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(result_size_);
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry);
+
+ // Special handling of out of memory exceptions.
+ __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ movq(rax, Operand(kScratchRegister, 0));
+ __ movq(rdx, ExternalReference::the_hole_value_location());
+ __ movq(rdx, Operand(rdx, 0));
+ __ movq(Operand(kScratchRegister, 0), rdx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ movq(rsp, Operand(rsp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ movq(kScratchRegister, handler_address);
+ __ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ movq(rax, Immediate(false));
+ __ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ store_rax(pending_exception);
+ }
+
+ // Clear the context pointer.
+ __ xor_(rsi, rsi);
+
+ // Restore registers from handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
+ StackHandlerConstants::kFPOffset);
+ __ pop(rbp); // FP
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ __ pop(rdx); // State
+
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(result_size_);
+
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: argv pointer (C callee-saved).
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ movq(rax, failure, RelocInfo::NONE);
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, &not_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(&not_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
+ }
+ __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ call(kScratchRegister);
+
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, &not_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
+
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
+ __ pop(rsi);
+ __ pop(rdi);
+#endif
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+ // Returns a bitwise zero to indicate that the value
+ // is and instance of the function and anything else to
+ // indicate that the value is not an instance.
+
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ JumpIfSmi(rax, &slow);
+
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ // rdx is function, rax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(rbx, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Register mapping:
+ // rax is object map.
+ // rdx is function.
+ // rbx is function prototype.
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ // The code at is_not_instance assumes that kScratchRegister contains a
+ // non-zero GCable value (the null object in this case).
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s",
+ cc_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ testb(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ movq(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ testb(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ testb(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxwl(result_, FieldOperand(object_,
+ scratch_, times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxbl(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ Integer32ToSmi(result_, result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(rax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ movq(scratch_, rax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
+ __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ Condition is_smi;
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rdx);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // rax: first string
+ // rdx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
+ __ SmiTest(rcx);
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in rax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rbx);
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in rdx.
+ __ movq(rax, rdx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // rax: first string
+ // rbx: length of first string
+ // rcx: length of second string
+ // rdx: second string
+ // r8: map of first string if string check was performed above
+ // r9: map of second string if string check was performed above
+ Label string_add_flat_result, longer_than_two;
+ __ bind(&both_not_zero_length);
+
+ // If arguments where known to be strings, maps are not loaded to r8 and r9
+ // by the code above.
+ if (!string_check_) {
+ __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ }
+ // Get the instance types of the two strings as they will be needed soon.
+ __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
+ __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+
+ // Look at the length of the result of adding the two strings.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+ __ SmiAdd(rbx, rbx, rcx, NULL);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ SmiCompare(rbx, Smi::FromInt(2));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(rbx, 2);
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+ __ j(below, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
+ __ j(above, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object. If
+ // both strings are ascii the result is an ascii cons string.
+ // rax: first string
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii, allocated, ascii_data;
+ __ movl(rcx, r8);
+ __ and_(rcx, r9);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(rcx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ __ bind(&ascii_data);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+ __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ movq(rax, rcx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // rcx: first instance type AND second instance type.
+ // r8: first instance type.
+ // r9: second instance type.
+ __ testb(rcx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ xor_(r8, r9);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ j(equal, &ascii_data);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // rax: first string
+ // rbx: length of resulting flat string as smi
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&string_add_flat_result);
+ __ SmiToInteger32(rbx, rbx);
+ __ movl(rcx, r8);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ __ movl(rcx, r9);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // rax: first string
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii_string_add_flat_result;
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(r8, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ testl(r9, Immediate(kAsciiStringTag));
+ __ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
+ // Both strings are ascii strings. As they are short they are both flat.
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second string
+ // rdi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+ // Locate first character of second argument.
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // rax: first string - known to be two byte
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ and_(r9, Immediate(kAsciiStringTag));
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second argument
+ // rdi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+ // Locate first character of second argument.
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ incq(src);
+ __ incq(dest);
+ } else {
+ __ movzxwl(kScratchRegister, Operand(src, 0));
+ __ movw(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(2));
+ __ addq(dest, Immediate(2));
+ }
+ __ decl(count);
+ __ j(not_zero, &loop);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ // Count is positive int32, dest and src are character pointers.
+ ASSERT(dest.is(rdi)); // rep movs destination
+ ASSERT(src.is(rsi)); // rep movs source
+ ASSERT(count.is(rcx)); // rep movs count
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ testl(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ STATIC_ASSERT(2 == sizeof(uc16));
+ __ addl(count, count);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ testl(count, Immediate(~7));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ movl(kScratchRegister, count);
+ __ shr(count, Immediate(3)); // Number of doublewords to copy.
+ __ repmovsq();
+
+ // Find number of bytes left.
+ __ movl(count, kScratchRegister);
+ __ and_(count, Immediate(7));
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ testl(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ incq(src);
+ __ incq(dest);
+ __ decl(count);
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ leal(scratch, Operand(c1, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ leal(scratch, Operand(c2, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, Immediate(kBitsPerByte));
+ __ orl(chars, c2);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ SmiToInteger32(mask,
+ FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ decl(mask);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string (32-bit int)
+ // symbol_table: symbol table
+ // mask: capacity mask (32-bit int)
+ // undefined: undefined value
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ movl(scratch, hash);
+ if (i > 0) {
+ __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ andl(scratch, mask);
+
+ // Load the entry from the symble table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ movq(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmpq(candidate, undefined);
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
+ Smi::FromInt(2));
+ __ j(not_equal, &next_probe[i]);
+
+ // We use kScratchRegister as a temporary register in assumption that
+ // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
+ Register temp = kScratchRegister;
+
+ // Check that the candidate is a non-external ascii string.
+ __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe[i]);
+
+ // Check if the two characters match.
+ __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ andl(temp, Immediate(0x0000ffff));
+ __ cmpl(chars, temp);
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ if (!result.is(rax)) {
+ __ movq(rax, result);
+ }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ movl(hash, character);
+ __ shll(hash, Immediate(10));
+ __ addl(hash, character);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ addl(hash, character);
+ // hash += hash << 10;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(10));
+ __ addl(hash, scratch);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ leal(hash, Operand(hash, hash, times_8, 0));
+ // hash ^= hash >> 11;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(11));
+ __ xorl(hash, scratch);
+ // hash += hash << 15;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(15));
+ __ addl(hash, scratch);
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ j(not_zero, &hash_not_zero);
+ __ movl(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: to
+ // rsp[16]: from
+ // rsp[24]: string
+
+ const int kToOffset = 1 * kPointerSize;
+ const int kFromOffset = kToOffset + kPointerSize;
+ const int kStringOffset = kFromOffset + kPointerSize;
+ const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+ // Make sure first argument is a string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
+ __ movq(rcx, Operand(rsp, kToOffset));
+ __ movq(rdx, Operand(rsp, kFromOffset));
+ __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+ __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
+ Label return_rax;
+ __ j(equal, &return_rax);
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ SmiToInteger32(rcx, rcx);
+ __ cmpl(rcx, Immediate(2));
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length (value is 2)
+ // rdx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
+ __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx,
+ FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ Set(rcx, 2);
+
+ __ bind(&result_longer_than_two);
+
+ // rax: string
+ // rbx: instance type
+ // rcx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ __ movq(rsi, rdx); // Restore rsi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ __ bind(&non_ascii_flat);
+ // rax: string
+ // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // rcx: result string length
+ // Check for sequential two byte string
+ __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ __ movq(rsi, rdx); // Restore esi.
+
+ __ bind(&return_rax);
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Ensure that you can always subtract a string length from a non-negative
+ // number (e.g. another length).
+ STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+
+ // Find minimum length and length difference.
+ __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movq(scratch4, scratch1);
+ __ SmiSub(scratch4,
+ scratch4,
+ FieldOperand(right, String::kLengthOffset),
+ NULL);
+ // Register scratch4 now holds left.length - right.length.
+ const Register length_difference = scratch4;
+ Label left_shorter;
+ __ j(less, &left_shorter);
+ // The right string isn't longer that the left one.
+ // Get the right string's length by subtracting the (non-negative) difference
+ // from the left string's length.
+ __ SmiSub(scratch1, scratch1, length_difference, NULL);
+ __ bind(&left_shorter);
+ // Register scratch1 now holds Min(left.length, right.length).
+ const Register min_length = scratch1;
+
+ Label compare_lengths;
+ // If min-length is zero, go directly to comparing lengths.
+ __ SmiTest(min_length);
+ __ j(zero, &compare_lengths);
+
+ __ SmiToInteger32(min_length, min_length);
+
+ // Registers scratch2 and scratch3 are free.
+ Label result_not_equal;
+ Label loop;
+ {
+ // Check characters 0 .. min_length - 1 in a loop.
+ // Use scratch3 as loop index, min_length as limit and scratch2
+ // for computation.
+ const Register index = scratch3;
+ __ movl(index, Immediate(0)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ // TODO(lrn): Could we load more than one character at a time?
+ __ movb(scratch2, FieldOperand(left,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ // Increment index and use -1 modifier on next load to give
+ // the previous load extra time to complete.
+ __ addl(index, Immediate(1));
+ __ cmpb(scratch2, FieldOperand(right,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize - 1));
+ __ j(not_equal, &result_not_equal);
+ __ cmpl(index, min_length);
+ __ j(not_equal, &loop);
+ }
+ // Completed loop without finding different characters.
+ // Compare lengths (precomputed).
+ __ bind(&compare_lengths);
+ __ SmiTest(length_difference);
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ Label result_greater;
+ __ bind(&result_not_equal);
+ // Unequal comparison of left to right, either character or length.
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Move(rax, Smi::FromInt(LESS));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(rax, Smi::FromInt(GREATER));
+ __ ret(0);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: right string
+ // rsp[16]: left string
+
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+
+ // Check for identity.
+ Label not_same;
+ __ cmpq(rdx, rax);
+ __ j(not_equal, &not_same);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+
+ // Inline comparison of ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ // Drop arguments from the stack
+ __ pop(rcx);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rcx);
+ GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
new file mode 100644
index 0000000000..18213b93e6
--- /dev/null
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -0,0 +1,389 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_CODE_STUBS_X64_H_
+#define V8_X64_CODE_STUBS_X64_H_
+
+#include "ic-inl.h"
+#include "type-info.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags,
+ TypeInfo operands_type = TypeInfo::Unknown())
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false),
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ static_operands_type_(TypeInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(type_info),
+ name_(NULL) {
+ }
+
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ bool ArgsInRegistersSupported() {
+ return (op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV);
+ }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
+
+ // Number type information of operands, determined by code generator.
+ TypeInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_),
+ static_operands_type_.ToString());
+ }
+#endif
+
+ // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
+ class ArgsReversedBits: public BitField<bool, 10, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 12, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_)
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register rax.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {}
+
+ // Compare two flat ascii strings and returns result in rax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask);
+
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, address and scratch) OOOOAAAASSSS.
+ class ScratchBits : public BitField<uint32_t, 0, 4> {};
+ class AddressBits : public BitField<uint32_t, 4, 4> {};
+ class ObjectBits : public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index e545ffa3dc..b1dd45e206 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -807,55 +808,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
}
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
// Call the specialized stub for a binary operation.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
@@ -1072,7 +1024,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
false, overwrite_mode);
@@ -1095,7 +1047,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_GENERIC_BINARY_FLAGS,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
}
}
@@ -2038,41 +1990,6 @@ void CodeGenerator::Comparison(AstNode* node,
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- dest->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- dest->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
} else if (left_side_constant_1_char_string ||
right_side_constant_1_char_string) {
if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
@@ -2616,8 +2533,10 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(is_smi, &build_args);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
+ __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
+ __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
+ __ Cmp(rcx, apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -4800,8 +4719,10 @@ void DeferredRegExpLiteral::Generate() {
class DeferredAllocateInNewSpace: public DeferredCode {
public:
- DeferredAllocateInNewSpace(int size, Register target)
- : size_(size), target_(target) {
+ DeferredAllocateInNewSpace(int size,
+ Register target,
+ int registers_to_save = 0)
+ : size_(size), target_(target), registers_to_save_(registers_to_save) {
ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
set_comment("[ DeferredAllocateInNewSpace");
}
@@ -4810,15 +4731,28 @@ class DeferredAllocateInNewSpace: public DeferredCode {
private:
int size_;
Register target_;
+ int registers_to_save_;
};
void DeferredAllocateInNewSpace::Generate() {
+ for (int i = 0; i < kNumRegs; i++) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ push(save_register);
+ }
+ }
__ Push(Smi::FromInt(size_));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
if (!target_.is(rax)) {
__ movq(target_, rax);
}
+ for (int i = kNumRegs - 1; i >= 0; i--) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ pop(save_register);
+ }
+ }
}
@@ -4989,12 +4923,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->depth() > 1) {
+ if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ clone = frame_->CallStub(&stub, 3);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
}
frame_->Push(&clone);
@@ -5004,12 +4944,9 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i);
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+ if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
+ continue;
+ }
// The property must be set by generated code.
Load(value);
@@ -5072,12 +5009,9 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5164,12 +5098,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame()->Push(&value);
Load(node->value());
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5267,11 +5198,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5674,11 +5602,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -5691,8 +5618,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// constructor invocation.
CodeForSourcePosition(node->position());
Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
@@ -6603,6 +6529,86 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ Result object_result = frame_->Pop();
+ object_result.ToRegister(rax);
+ object_result.Unuse();
+ {
+ VirtualFrame::SpilledScope spilled_scope;
+
+ Label done;
+ __ JumpIfSmi(rax, &done);
+
+ // Load JSRegExpResult map into rdx.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ cmpq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &done);
+
+ if (FLAG_debug_code) {
+ // Check that object really has empty properties array, as the map
+ // should guarantee.
+ __ CompareRoot(FieldOperand(rax, JSObject::kPropertiesOffset),
+ Heap::kEmptyFixedArrayRootIndex);
+ __ Check(equal, "JSRegExpResult: default map but non-empty properties.");
+ }
+
+ DeferredAllocateInNewSpace* allocate_fallback =
+ new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
+ rbx,
+ rdx.bit() | rax.bit());
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ rbx,
+ no_reg,
+ no_reg,
+ allocate_fallback->entry_label(),
+ TAG_OBJECT);
+ __ bind(allocate_fallback->exit_label());
+
+ STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0);
+ // There is an even number of fields, so unroll the loop once
+ // for efficiency.
+ for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) {
+ STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0);
+ if (i != JSObject::kMapOffset) {
+ // The map was already loaded into edx.
+ __ movq(rdx, FieldOperand(rax, i));
+ }
+ __ movq(rcx, FieldOperand(rax, i + kPointerSize));
+
+ STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0);
+ if (i == JSObject::kElementsOffset) {
+ // If the elements array isn't empty, make it copy-on-write
+ // before copying it.
+ Label empty;
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &empty);
+ __ LoadRoot(kScratchRegister, Heap::kFixedCOWArrayMapRootIndex);
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), kScratchRegister);
+ __ bind(&empty);
+ }
+ __ movq(FieldOperand(rbx, i), rdx);
+ __ movq(FieldOperand(rbx, i + kPointerSize), rcx);
+ }
+ __ movq(rax, rbx);
+
+ __ bind(&done);
+ }
+ frame_->Push(rax);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst,
@@ -6875,7 +6881,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(not_zero);
- // Check the object's elements are in fast case.
+ // Check the object's elements are in fast case and writable.
__ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
@@ -7217,6 +7223,34 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result string = frame_->Pop();
+ string.ToRegister();
+
+ Result number = allocator()->Allocate();
+ ASSERT(number.is_valid());
+ __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
+ __ IndexFromHash(number.reg(), number.reg());
+ string.Unuse();
+ frame_->Push(&number);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -7345,9 +7379,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
@@ -7765,11 +7797,9 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ if (node->left()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ } else if (node->right()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_RIGHT;
}
@@ -7955,6 +7985,40 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+ if (node->is_strict()) {
+ operand.Unuse();
+ destination()->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ destination()->true_target()->Branch(equal);
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+ destination()->true_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ destination()->false_target()->Branch(is_smi);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ destination()->Split(not_zero);
+ }
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
@@ -8419,15 +8483,10 @@ Result CodeGenerator::EmitKeyedLoad() {
// Check that the key is a non-negative smi.
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
+ // Get the elements array from the receiver.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ Assert(equal, "JSObject with fast elements map has slow elements");
- }
+ __ AssertFastElements(elements.reg());
// Check that key is within bounds.
__ SmiCompare(key.reg(),
@@ -8730,3921 +8789,17 @@ void Reference::SetValue(InitState init_state) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
-
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Setup the fixed slots.
- __ xor_(rbx, rbx); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the surrounding context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, &not_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
- // These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in rax.
- __ bind(&true_result);
- __ movq(rax, Immediate(1));
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ xor_(rax, rax);
- __ ret(1 * kPointerSize);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
-
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
+Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right) {
+ if (stub->ArgsInRegistersSupported()) {
+ stub->SetArgsInRegisters();
+ return frame_->CallStub(stub, left, right);
} else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
- }
-
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
-
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
- break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
-
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
-
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
-
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
-
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
-
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
-
- __ Push(Smi::FromInt(runtime_operands_type_));
-
- __ push(rcx); // The return address.
-
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
- // Registers:
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
-
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
-
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
-
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
-
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- if (negative_zero_ == kStrictNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
- } else {
- __ jmp(&slow);
- }
-
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rcx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
- __ j(above, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
- __ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // rax: first part of cons string.
- // rbx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
-
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
- __ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
- __ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
- __ bind(&setup_rest);
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 1: Subject string.
- __ movq(arg1, rax);
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
-
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
-
- // Check the result.
- Label success;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
- Label failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
-
- // Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
-
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
- }
-
- __ bind(&not_identical);
- }
-
- if (cc_ == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, &not_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&not_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0);
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ xor_(rsi, rsi); // tentatively set context pointer to NULL
- Label skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
-}
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- Label empty_result;
- Label prologue;
- Label promote_scheduled_exception;
- __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, 0);
- ASSERT_EQ(kArgc, 4);
-#ifdef _WIN64
- // All the parameters should be set up by a caller.
-#else
- // Set 1st parameter register with property name.
- __ movq(rsi, rdx);
- // Second parameter register rdi should be set with pointer to AccessorInfo
- // by a caller.
-#endif
- // Call the api function!
- __ movq(rax,
- reinterpret_cast<int64_t>(fun()->address()),
- RelocInfo::RUNTIME_ENTRY);
- __ call(rax);
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
- __ movq(rsi, scheduled_exception_address);
- __ Cmp(Operand(rsi, 0), Factory::the_hole_value());
- __ j(not_equal, &promote_scheduled_exception);
-#ifdef _WIN64
- // rax keeps a pointer to v8::Handle, unpack it.
- __ movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- __ testq(rax, rax);
- __ j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- __ movq(rax, Operand(rax, 0));
- __ bind(&prologue);
- __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
- __ ret(0);
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- __ bind(&empty_result);
- // It was zero; the result is undefined.
- __ Move(rax, Factory::undefined_value());
- __ jmp(&prologue);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, Operand(rsp, 4 * kPointerSize));
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, Operand(rsp, 6 * kPointerSize));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, Operand(rsp, 4 * kPointerSize));
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_, result_size_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
-
- // Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
- }
-
- // Clear the context pointer.
- __ xor_(rsi, rsi);
-
- // Restore registers from handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
-
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- __ ret(0);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, result_size_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ movq(rax, rbp);
- __ store_rax(js_entry_sp);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
- __ j(not_equal, &not_outermost_js_2);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // rdx is function, rax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ testb(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ testb(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ Integer32ToSmi(result_, result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(rax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ movq(scratch_, rax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string if string check was performed above
- // r9: map of second string if string check was performed above
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (!string_check_) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx, NULL);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rbx, 2);
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(rcx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&string_add_flat_result);
- __ SmiToInteger32(rbx, rbx);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(r8, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kAsciiStringTag));
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&non_ascii_string_add_flat_result);
- __ and_(r9, Immediate(kAsciiStringTag));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- Label done;
- __ testl(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~7));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ decl(mask);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // symbol_table: symbol table
- // mask: capacity mask (32-bit int)
- // undefined: undefined value
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the symble table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- __ cmpq(candidate, undefined);
- __ j(equal, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ascii string.
- __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ movl(hash, character);
- __ shll(hash, Immediate(10));
- __ addl(hash, character);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ movl(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
- __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
- Label return_rax;
- __ j(equal, &return_rax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiToInteger32(rcx, rcx);
- __ cmpl(rcx, Immediate(2));
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // rax: string
- // rbx: instance type
- // rcx: sub string length (value is 2)
- // rdx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
- __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx,
- FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ movq(rax, Operand(rsp, kStringOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ Set(rcx, 2);
-
- __ bind(&result_longer_than_two);
-
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
-
- __ bind(&return_rax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset),
- NULL);
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- Label left_shorter;
- __ j(less, &left_shorter);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference, NULL);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- Label compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths);
-
- __ SmiToInteger32(min_length, min_length);
-
- // Registers scratch2 and scratch3 are free.
- Label result_not_equal;
- Label loop;
- {
- // Check characters 0 .. min_length - 1 in a loop.
- // Use scratch3 as loop index, min_length as limit and scratch2
- // for computation.
- const Register index = scratch3;
- __ movl(index, Immediate(0)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- // TODO(lrn): Could we load more than one character at a time?
- __ movb(scratch2, FieldOperand(left,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- // Increment index and use -1 modifier on next load to give
- // the previous load extra time to complete.
- __ addl(index, Immediate(1));
- __ cmpb(scratch2, FieldOperand(right,
- index,
- times_1,
- SeqAsciiString::kHeaderSize - 1));
- __ j(not_equal, &result_not_equal);
- __ cmpl(index, min_length);
- __ j(not_equal, &loop);
+ frame_->Push(left);
+ frame_->Push(right);
+ return frame_->CallStub(stub, 2);
}
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- Label result_greater;
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
-
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
-
- // Check for identity.
- Label not_same;
- __ cmpq(rdx, rax);
- __ j(not_equal, &not_same);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
- // Drop arguments from the stack
- __ pop(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
#undef __
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 14f690eb81..07bdadf9a1 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -492,6 +492,11 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Generate a stub call from the virtual frame.
+ Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right);
+
// Emits code sequence that jumps to a JumpTarget if the inputs
// are both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
@@ -586,9 +591,7 @@ class CodeGenerator: public AstVisitor {
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
- static bool PatchInlineRuntimeEntry(Handle<String> name,
- const InlineRuntimeLUT& new_entry,
- InlineRuntimeLUT* old_entry);
+
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -659,6 +662,8 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
@@ -681,6 +686,9 @@ class CodeGenerator: public AstVisitor {
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -750,357 +758,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type = TypeInfo::Unknown())
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(type_info),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
- class ArgsReversedBits: public BitField<bool, 10, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
- class StaticTypeInfoBits: public BitField<int, 12, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool ArgsInRegistersSupported() {
- return (op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV);
- }
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-};
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {}
-
- // Compare two flat ascii strings and returns result in rax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, address and scratch) OOOOAAAASSSS.
- class ScratchBits : public BitField<uint32_t, 0, 4> {};
- class AddressBits : public BitField<uint32_t, 4, 4> {};
- class ObjectBits : public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index d5b7e7768c..2c1056f579 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -47,22 +47,35 @@ bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs,
+ RegList object_regs,
+ RegList non_object_regs,
bool convert_call_to_jmp) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
// Enter an internal frame.
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- __ PushRegistersFromMemory(pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as as two smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ ASSERT(!reg.is(kScratchRegister));
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ // Store the 64-bit value as two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ movq(kScratchRegister, reg);
+ __ Integer32ToSmi(reg, reg);
+ __ push(reg);
+ __ sar(kScratchRegister, Immediate(32));
+ __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+ __ push(kScratchRegister);
+ }
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -70,12 +83,29 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ xor_(rax, rax); // No arguments (argc == 0).
__ movq(rbx, ExternalReference::debug_break());
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
- // Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- __ PopRegistersToMemory(pointer_regs);
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, kDebugZapValue);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ // Reconstruct the 64-bit value from two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ shl(kScratchRegister, Immediate(32));
+ __ pop(reg);
+ __ SmiToInteger32(reg, reg);
+ __ or_(reg, kScratchRegister);
+ }
+ }
// Get rid of the internal frame.
__ LeaveInternalFrame();
@@ -83,12 +113,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ pop(rax);
+ __ addq(rsp, Immediate(kPointerSize));
}
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -100,12 +127,11 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-x64.cc)
+ // Register state for IC call call (from ic-x64.cc)
// ----------- S t a t e -------------
- // -- rax: number of arguments
+ // -- rcx: function name
// -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
}
@@ -117,7 +143,7 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// -- rax: number of arguments
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
}
@@ -127,7 +153,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// -- rax : key
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
}
@@ -138,7 +164,8 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- rcx : key
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
}
@@ -148,7 +175,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- rax : receiver
// -- rcx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
}
@@ -157,7 +184,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: return value
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), true);
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
}
@@ -168,7 +195,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -- rcx : name
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
}
@@ -177,7 +205,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, 0, 0, false);
}
@@ -197,7 +225,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0, true);
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 85ebc9586b..fd26535155 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -35,19 +35,6 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
@@ -58,55 +45,10 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- // Determine frame type.
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-byte* InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-byte* JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 725cbb0c58..ccd0392a30 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -229,6 +230,13 @@ void FullCodeGenerator::EmitReturnSequence() {
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ return kNoConstants;
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -253,20 +261,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -295,20 +290,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
case Expression::kTest:
Move(result_register(), slot);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(result_register(), slot);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -334,20 +316,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
case Expression::kTest:
__ Move(result_register(), lit->handle());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ Move(result_register(), lit->handle());
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -374,20 +343,7 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
case Expression::kTest:
__ pop(result_register());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- __ movq(result_register(), Operand(rsp, 0));
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -422,56 +378,7 @@ void FullCodeGenerator::DropAndApply(int count,
case Expression::kTest:
__ Drop(count);
if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ movq(result_register(), reg);
- __ movq(Operand(rsp, 0), result_register());
- break;
- }
- DoTest(context);
- break;
- }
-}
-
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -512,32 +419,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ Move(result_register(), Factory::true_value());
- break;
- case kStack:
- __ Push(Factory::true_value());
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ Move(result_register(), Factory::false_value());
- break;
- case kStack:
- __ Push(Factory::false_value());
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -565,78 +446,19 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) __ PushRoot(Heap::kFalseValueRootIndex);
- break;
- }
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) __ PushRoot(Heap::kTrueValueRootIndex);
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
- __ jmp(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is in the accumulator. If the value might be needed
- // on the stack (value/test and test/value contexts with a stack location
- // desired), then the value is already duplicated on the stack.
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
- // In value/test and test/value expression contexts with stack as the
- // desired location, there is already an extra value on the stack. Use a
- // label to discard it if unneeded.
- Label discard;
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_false = &discard;
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_true = &discard;
- break;
- }
- break;
- }
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Emit the inlined tests assumed by the stub.
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
__ j(equal, if_false);
@@ -650,83 +472,28 @@ void FullCodeGenerator::DoTest(Expression::Context context) {
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, if_true);
- // Save a copy of the value if it may be needed and isn't already saved.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- }
-
// Call the ToBoolean stub for all other cases.
ToBooleanStub stub;
__ push(result_register());
__ CallStub(&stub);
__ testq(rax, rax);
- // The stub returns nonzero for true. Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ j(not_zero, true_label_);
- __ jmp(false_label_);
- break;
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ j(zero, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ j(not_zero, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ j(not_zero, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ j(zero, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
@@ -912,17 +679,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
- __ JumpIfNotBothSmi(rdx, rax, &slow_case);
- __ SmiCompare(rdx, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ // Perform the comparison as if via '==='.
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ JumpIfNotBothSmi(rdx, rax, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(equal, true);
__ CallStub(&stub);
__ testq(rax, rax);
@@ -1206,7 +974,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized);
@@ -1330,12 +1098,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
- if (expr->depth() > 1) {
+ if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@@ -1389,10 +1163,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1403,57 +1178,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(rax); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1494,13 +1282,85 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ ASSERT(constant == kNoConstants); // Only handled case.
+
+ // Do combined smi check of the operands. Left operand is on the
+ // stack (popped into rdx). Right operand is in rax but moved into
+ // rcx to make the shifts easier.
+ Label done, stub_call, smi_case;
+ __ pop(rdx);
+ __ movq(rcx, rax);
+ Condition smi = __ CheckBothSmi(rdx, rax);
+ __ j(smi, &smi_case);
+
+ __ bind(&stub_call);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (stub.ArgsInRegistersSupported()) {
+ stub.GenerateCall(masm_, rdx, rcx);
+ } else {
+ __ push(rdx);
+ __ push(rcx);
+ __ CallStub(&stub);
+ }
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(rax, rdx, rcx);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(rax, rdx, rcx);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::ADD:
+ __ SmiAdd(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::SUB:
+ __ SmiSub(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::MUL:
+ __ SmiMul(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::BIT_OR:
+ __ SmiOr(rax, rdx, rcx);
+ break;
+ case Token::BIT_AND:
+ __ SmiAnd(rax, rdx, rcx);
+ break;
+ case Token::BIT_XOR:
+ __ SmiXor(rax, rdx, rcx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ __ bind(&done);
+ Apply(context, rax);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- __ push(result_register());
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
+ Expression::Context context,
+ OverwriteMode mode) {
+ GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
+ if (stub.ArgsInRegistersSupported()) {
+ __ pop(rdx);
+ stub.GenerateCall(masm_, rdx, rax);
+ } else {
+ __ push(result_register());
+ __ CallStub(&stub);
+ }
Apply(context, rax);
}
@@ -1923,11 +1783,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
- VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForValue(expr->expression(), kStack);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -1940,16 +1800,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into rdi and rax.
+ // Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- // Function is in rsp[arg_count + 1].
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in rax, or pop it.
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
}
@@ -1961,7 +1818,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -1978,11 +1837,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
Condition positive_smi = __ CheckPositiveSmi(rax);
- __ j(positive_smi, if_true);
- __ jmp(if_false);
+ Split(positive_smi, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1996,7 +1856,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -2010,8 +1872,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
- __ j(below_equal, if_true);
- __ jmp(if_false);
+ Split(below_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2025,12 +1886,13 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(above_equal, if_true);
- __ jmp(if_false);
+ Split(above_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2044,14 +1906,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
- __ jmp(if_false);
+ Split(not_zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2066,7 +1929,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -2084,12 +1949,13 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2103,12 +1969,13 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2122,12 +1989,13 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2140,7 +2008,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2156,8 +2026,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2173,12 +2042,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(rbx);
__ cmpq(rax, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2187,8 +2057,8 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
VisitForValue(args->at(0), kAccumulator);
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
@@ -2392,7 +2262,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
VisitForValue(args->at(0), kStack); // Load the object.
VisitForValue(args->at(1), kAccumulator); // Load the value.
- __ pop(rbx); // rax = value. ebx = object.
+ __ pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -2727,6 +2597,40 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ testl(FieldOperand(rax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ IndexFromHash(rax, rax);
+
+ Apply(context_, rax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2826,19 +2730,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- break;
- case kStack:
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2850,42 +2742,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
-
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ Move(rcx, proxy->name());
- __ movq(rax, CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ push(rax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, rax);
break;
@@ -2906,9 +2774,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2922,27 +2788,24 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register rax.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ JumpIfNotSmi(rax, &call_stub);
+ __ SmiNot(rax, rax);
+ __ jmp(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ jmp(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ SmiNot(result_register(), result_register());
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, rax);
break;
}
@@ -2954,6 +2817,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
// Invalid left-hand-sides are rewritten to have a 'throw
// ReferenceError' as the left-hand side.
@@ -3019,8 +2883,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -3041,7 +2903,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
} else {
@@ -3124,83 +2986,144 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmpq(obj, null_const);
- if (strict) {
- __ j(equal, if_true);
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Move(rcx, proxy->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(rax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(rsi);
+ __ Push(proxy->name());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(rax);
} else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
+ }
+}
+
+
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_true);
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // Check for undetectable objects => false.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE);
+ Split(below, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
- __ CompareRoot(obj, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(rax, Heap::kFalseValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- __ JumpIfSmi(obj, if_false);
- // It can be an undetectable object.
- __ movq(scratch, FieldOperand(obj, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // Check for undetectable objects => true.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+ __ j(equal, if_true);
+ // Regular expressions => 'function' (they are callable).
+ __ CmpInstanceType(rdx, JS_REGEXP_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx);
+ __ j(equal, if_false);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
+ __ j(not_zero, if_false);
+ // Check for JS objects => true.
+ __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, if_false);
+ __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE);
+ Split(below_equal, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
- switch (expr->op()) {
+ switch (op) {
case Token::IN:
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
@@ -3208,8 +3131,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
break;
}
@@ -3217,28 +3140,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through.
- case Token::EQ: {
+ case Token::EQ:
cc = equal;
__ pop(rdx);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, rdx, rax, if_true, if_false, rcx);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, rax, rdx, if_true, if_false, rcx);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = less;
__ pop(rdx);
@@ -3265,20 +3174,18 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+ __ SmiCompare(rdx, rax);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3288,6 +3195,35 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ __ j(equal, if_true);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, if_true);
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // It can be an undetectable object.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, rax);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index a8971f5b4a..a74e621e15 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -487,6 +487,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -515,10 +516,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// scratch - used to hold elements of the receiver and the loaded value.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
// Check that the key (index) is within bounds.
__ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
// Unsigned comparison rejects negative indices.
@@ -567,31 +572,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- __ and_(hash, Immediate(String::kArrayIndexValueMask));
- __ shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ Integer32ToSmi(key, hash);
-}
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -611,13 +591,19 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in rcx.
+ __ testb(FieldOperand(rcx, Map::kBitField2Offset),
+ Immediate(1 << Map::kHasFastElements));
+ __ j(zero, &check_pixel_array);
+
GenerateFastArrayLoad(masm,
rdx,
rax,
rcx,
rbx,
rax,
- &check_pixel_array,
+ NULL,
&slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@@ -626,7 +612,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements object is a pixel array.
// rdx: receiver
// rax: key
- // rcx: elements array
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
@@ -732,7 +718,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, rax, rbx);
+ __ IndexFromHash(rbx, rax);
__ jmp(&index_smi);
}
@@ -1012,7 +998,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rdx: JSObject
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
+ // Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
@@ -1075,8 +1061,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode; if it is the
- // length is always a smi.
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
__ bind(&array);
// rax: value
// rdx: receiver (a JSArray)
@@ -1588,7 +1574,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, rcx, rbx);
+ __ IndexFromHash(rbx, rcx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1862,6 +1848,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ j(not_equal, &miss);
// Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
__ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index c1954a898a..165c51dd27 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -262,6 +262,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ Label ok;
+ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ j(equal, &ok);
+ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ j(equal, &ok);
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L);
@@ -376,6 +391,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. Even if we subsequently go to
+ // the slow case, converting the key to a smi is always valid.
+ // key: string key
+ // hash: key's hash field, including its array index value.
+ and_(hash, Immediate(String::kArrayIndexValueMask));
+ shr(hash, Immediate(String::kHashShift));
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ Integer32ToSmi(index, hash);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
@@ -566,28 +600,21 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
-
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
// Load the builtins object into target register.
movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movq(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
- // Load the JavaScript builtin function from the builtins object.
- movq(rdi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
- // Load the code entry point from the builtins object.
- movq(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(target);
- movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
- cmpq(target, Operand(rsp, 0));
- Assert(equal, "Builtin code object changed");
- pop(target);
- }
- lea(target, FieldOperand(target, Code::kHeaderSize));
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(rdi));
+ // Load the JavaScript builtin function from the builtins object.
+ GetBuiltinFunction(rdi, id);
+ movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -2094,91 +2121,8 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-void MacroAssembler::PushRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Push the content of the memory location to the stack.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- push(Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(Operand(kScratchRegister, 0), reg);
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(reg, Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::PopRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Pop the content from the stack to the memory location.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- pop(Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT(!scratch.is(kScratchRegister));
- ASSERT(!base.is(kScratchRegister));
- ASSERT(!base.is(scratch));
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- movq(scratch, Operand(base, 0));
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(Operand(kScratchRegister, 0), scratch);
- lea(base, Operand(base, kPointerSize));
- }
- }
-}
+#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
xor_(rax, rax); // no arguments
@@ -2296,10 +2240,9 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
InvokeCode(rdx, expected, actual, flag);
@@ -2349,8 +2292,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
- bool save_rax) {
+void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Setup the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -2359,7 +2301,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
push(rbp);
movq(rbp, rsp);
- // Reserve room for entry stack pointer and push the debug marker.
+ // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
@@ -2378,23 +2320,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
store_rax(context_address);
}
-void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode,
- int result_size,
+void MacroAssembler::EnterExitFrameEpilogue(int result_size,
int argc) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // TODO(1243899): This should be symmetric to
- // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
- // correct here, but computed for the other call. Very error
- // prone! FIX THIS. Actually there are deeper problems with
- // register saving than this asymmetry (see the bug report
- // associated with this issue).
- PushRegistersFromMemory(kJSCallerSaved);
- }
-#endif
-
#ifdef _WIN64
// Reserve space on stack for result and argument structures, if necessary.
int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
@@ -2423,48 +2350,35 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode,
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
- EnterExitFramePrologue(mode, true);
+void MacroAssembler::EnterExitFrame(int result_size) {
+ EnterExitFramePrologue(true);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
- EnterExitFrameEpilogue(mode, result_size, 2);
+ EnterExitFrameEpilogue(result_size, 2);
}
-void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+void MacroAssembler::EnterApiExitFrame(int stack_space,
int argc,
int result_size) {
- EnterExitFramePrologue(mode, false);
+ EnterExitFramePrologue(false);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, (stack_space * kPointerSize) + offset));
- EnterExitFrameEpilogue(mode, result_size, argc);
+ EnterExitFrameEpilogue(result_size, argc);
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
+void MacroAssembler::LeaveExitFrame(int result_size) {
// Registers:
// r12 : argv
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // It's okay to clobber register rbx below because we don't need
- // the function pointer after this.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- lea(rbx, Operand(rbp, kOffset));
- CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
- }
-#endif
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 7083224bdf..9f5a746581 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -132,13 +132,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void PushRegistersFromMemory(RegList regs);
- void PopRegistersToMemory(RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -161,17 +154,16 @@ class MacroAssembler: public Assembler {
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
// to the first argument in register rsi.
- void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
+ void EnterExitFrame(int result_size = 1);
- void EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+ void EnterApiExitFrame(int stack_space,
int argc,
int result_size = 1);
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
+ void LeaveExitFrame(int result_size = 1);
// ---------------------------------------------------------------------------
@@ -203,6 +195,9 @@ class MacroAssembler: public Assembler {
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
@@ -720,6 +715,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -832,6 +833,8 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertFastElements(Register elements);
+
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
@@ -873,8 +876,8 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
- void EnterExitFramePrologue(ExitFrame::Mode mode, bool save_rax);
- void EnterExitFrameEpilogue(ExitFrame::Mode mode, int result_size, int argc);
+ void EnterExitFramePrologue(bool save_rax);
+ void EnterExitFrameEpilogue(int result_size, int argc);
// Allocation support helpers.
// Loads the top of new-space into the result register.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 80318648ea..91e2b449e0 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -32,11 +32,9 @@
#include "serialize.h"
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "x64/macro-assembler-x64.h"
#include "x64/regexp-macro-assembler-x64.h"
namespace v8 {
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 7aaeab793d..f500ce647e 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -31,9 +31,10 @@
#if defined(V8_TARGET_ARCH_X64)
#include "ic-inl.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "stub-cache.h"
-#include "macro-assembler-x64.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -1084,16 +1085,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
+ Label call_builtin;
+
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
+ // Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
Factory::fixed_array_map());
- __ j(not_equal, &miss);
+ __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1164,7 +1167,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Push the argument...
__ movq(Operand(rdx, 0), rcx);
// ... and fill the rest with holes.
- __ Move(kScratchRegister, Factory::the_hole_value());
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
@@ -1175,15 +1178,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
+
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
}
+ __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
@@ -1204,11 +1208,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
String* name,
CheckType check) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- ...
- // -- esp[(argc + 1) * 4] : receiver
+ // -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
@@ -1235,9 +1239,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &miss);
+ // Check that the elements are in fast mode and writable.
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &call_builtin);
// Get the array's length into rcx and calculate new length.
__ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1245,7 +1250,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(negative, &return_undefined);
// Get the last element.
- __ Move(r9, Factory::the_hole_value());
+ __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
__ movq(rax, FieldOperand(rbx,
rcx, times_pointer_size,
FixedArray::kHeaderSize));
@@ -1265,14 +1270,14 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
-
- __ Move(rax, Factory::undefined_value());
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
+
__ bind(&miss);
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
@@ -1287,8 +1292,69 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rax);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+ rbx, rdx, rdi, name, &miss);
+
+ Register receiver = rax;
+ Register index = rdi;
+ Register scratch1 = rbx;
+ Register scratch2 = rdx;
+ Register result = rax;
+ __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
@@ -1297,10 +1363,67 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
-}
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rax);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+ rbx, rdx, rdi, name, &miss);
+
+ Register receiver = rbx;
+ Register index = rdi;
+ Register scratch = rdx;
+ Register result = rax;
+ __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
+}
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index b8b008c7a6..88e7cc8811 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -1230,9 +1230,9 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
- PushElementAt(arg_count + 1);
+ PushElementAt(arg_count);
Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(rdi);
// Constructors are called with the number of arguments in register
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 7689371a08..d03f5f7a00 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -35,6 +35,11 @@ test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
# BUG(382): Weird test. Can't guarantee that it never times out.
test-api/ApplyInterruption: PASS || TIMEOUT
+# Bug (484): This test which we thought was originally corrected in r5236
+# is reappering. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
@@ -42,9 +47,6 @@ test-serialize/DependentTestThatAlwaysFails: FAIL
[ $arch == arm ]
-# BUG(240): Test seems flaky on ARM.
-test-api/RegExpInterruption: SKIP
-
# We cannot assume that we can throw OutOfMemory exceptions in all situations.
# Apparently our ARM box is in such a state. Skip the test as it also runs for
# a long time.
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 8bfa51c60a..2b50db7ecd 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -37,6 +37,7 @@
#include "top.h"
#include "utils.h"
#include "cctest.h"
+#include "parser.h"
static const bool kLogThreading = true;
@@ -3605,6 +3606,29 @@ THREADED_TEST(ExceptionExtensions) {
}
+static const char* kNativeCallInExtensionSource =
+ "function call_runtime_last_index_of(x) {"
+ " return %StringLastIndexOf(x, 'bob', 10);"
+ "}";
+
+
+static const char* kNativeCallTest =
+ "call_runtime_last_index_of('bobbobboellebobboellebobbob');";
+
+// Test that a native runtime calls are supported in extensions.
+THREADED_TEST(NativeCallInExtensions) {
+ v8::HandleScope handle_scope;
+ v8::RegisterExtension(new Extension("nativecall",
+ kNativeCallInExtensionSource));
+ const char* extension_names[] = { "nativecall" };
+ v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::Handle<Context> context = Context::New(&extensions);
+ Context::Scope lock(context);
+ v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
+ CHECK_EQ(result, v8::Integer::New(3));
+}
+
+
static void CheckDependencies(const char* name, const char* expected) {
v8::HandleScope handle_scope;
v8::ExtensionConfiguration config(1, &name);
@@ -8601,15 +8625,12 @@ TEST(PreCompileInvalidPreparseDataError) {
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK(!sd->HasError());
// ScriptDataImpl private implementation details
- const int kUnsignedSize = sizeof(unsigned);
- const int kHeaderSize = 4;
- const int kFunctionEntrySize = 4;
+ const int kHeaderSize = i::ScriptDataImpl::kHeaderSize;
+ const int kFunctionEntrySize = i::FunctionEntry::kSize;
const int kFunctionEntryStartOffset = 0;
const int kFunctionEntryEndOffset = 1;
unsigned* sd_data =
reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
- CHECK_EQ(sd->Length(),
- (kHeaderSize + 2 * kFunctionEntrySize) * kUnsignedSize);
// Overwrite function bar's end position with 0.
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
@@ -8625,6 +8646,8 @@ TEST(PreCompileInvalidPreparseDataError) {
try_catch.Reset();
// Overwrite function bar's start position with 200. The function entry
// will not be found when searching for it by position.
+ sd = v8::ScriptData::PreCompile(script, i::StrLength(script));
+ sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
compiled_script = Script::New(source, NULL, sd);
@@ -9203,6 +9226,7 @@ class RegExpStringModificationTest {
morphs_ < kMaxModifications) {
int morphs_before = morphs_;
{
+ v8::HandleScope scope;
// Match 15-30 "a"'s against 14 and a "b".
const char* c_source =
"/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/"
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 9033f4b874..7c669d30b7 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -91,7 +91,7 @@ TEST(1) {
Label L, C;
__ mov(r1, Operand(r0));
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(0, RelocInfo::NONE));
__ b(&C);
__ bind(&L);
@@ -99,7 +99,7 @@ TEST(1) {
__ sub(r1, r1, Operand(1));
__ bind(&C);
- __ teq(r1, Operand(0));
+ __ teq(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &L);
__ mov(pc, Operand(lr));
@@ -135,7 +135,7 @@ TEST(2) {
__ sub(r1, r1, Operand(1));
__ bind(&C);
- __ teq(r1, Operand(0));
+ __ teq(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &L);
__ mov(pc, Operand(lr));
@@ -226,11 +226,17 @@ TEST(4) {
double a;
double b;
double c;
+ double d;
+ double e;
+ double f;
+ int i;
+ float x;
+ float y;
} T;
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
- // the doubles t.a, t.b, and t.c.
+ // the doubles and floats.
Assembler assm(NULL, 0);
Label L, C;
@@ -252,6 +258,34 @@ TEST(4) {
__ vmov(d4, r2, r3);
__ vstr(d4, r4, OFFSET_OF(T, b));
+ // Load t.x and t.y, switch values, and store back to the struct.
+ __ vldr(s0, r4, OFFSET_OF(T, x));
+ __ vldr(s31, r4, OFFSET_OF(T, y));
+ __ vmov(s16, s0);
+ __ vmov(s0, s31);
+ __ vmov(s31, s16);
+ __ vstr(s0, r4, OFFSET_OF(T, x));
+ __ vstr(s31, r4, OFFSET_OF(T, y));
+
+ // Move a literal into a register that can be encoded in the instruction.
+ __ vmov(d4, 1.0);
+ __ vstr(d4, r4, OFFSET_OF(T, e));
+
+ // Move a literal into a register that requires 64 bits to encode.
+ // 0x3ff0000010000000 = 1.000000059604644775390625
+ __ vmov(d4, 1.000000059604644775390625);
+ __ vstr(d4, r4, OFFSET_OF(T, d));
+
+ // Convert from floating point to integer.
+ __ vmov(d4, 2.0);
+ __ vcvt_s32_f64(s31, d4);
+ __ vstr(s31, r4, OFFSET_OF(T, i));
+
+ // Convert from integer to floating point.
+ __ mov(lr, Operand(42));
+ __ vmov(s31, lr);
+ __ vcvt_f64_s32(d4, s31);
+ __ vstr(d4, r4, OFFSET_OF(T, f));
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
@@ -267,8 +301,20 @@ TEST(4) {
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.i = 0;
+ t.x = 4.5;
+ t.y = 9.0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
+ CHECK_EQ(4.5, t.y);
+ CHECK_EQ(9.0, t.x);
+ CHECK_EQ(2, t.i);
+ CHECK_EQ(42.0, t.f);
+ CHECK_EQ(1.0, t.e);
+ CHECK_EQ(1.000000059604644775390625, t.d);
CHECK_EQ(4.25, t.c);
CHECK_EQ(4.25, t.b);
CHECK_EQ(1.5, t.a);
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 0a2310e3e8..955562b287 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -109,7 +109,7 @@ TEST(MIPS1) {
__ bind(&C);
__ xori(v1, a1, 0);
- __ Branch(ne, &L, v1, Operand(0));
+ __ Branch(ne, &L, v1, Operand(0, RelocInfo::NONE));
__ nop();
__ jr(ra);
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 9931f5607a..9c292bcfca 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -57,35 +57,6 @@ TEST(List) {
}
-TEST(RemoveLast) {
- List<int> list(4);
- CHECK_EQ(0, list.length());
- list.Add(1);
- CHECK_EQ(1, list.length());
- CHECK_EQ(1, list.last());
- list.RemoveLast();
- CHECK_EQ(0, list.length());
- list.Add(2);
- list.Add(3);
- CHECK_EQ(2, list.length());
- CHECK_EQ(3, list.last());
- list.RemoveLast();
- CHECK_EQ(1, list.length());
- CHECK_EQ(2, list.last());
- list.RemoveLast();
- CHECK_EQ(0, list.length());
-
- const int kElements = 100;
- for (int i = 0; i < kElements; i++) list.Add(i);
- for (int j = kElements - 1; j >= 0; j--) {
- CHECK_EQ(j + 1, list.length());
- CHECK_EQ(j, list.last());
- list.RemoveLast();
- CHECK_EQ(j, list.length());
- }
-}
-
-
TEST(DeleteEmpty) {
{
List<int>* list = new List<int>(0);
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 0455790e04..f5526cea98 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -869,8 +869,8 @@ static void DebugEventBreakPointCollectGarbage(
// Scavenge.
Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
} else {
- // Mark sweep (and perhaps compact).
- Heap::CollectAllGarbage(false);
+ // Mark sweep compact.
+ Heap::CollectAllGarbage(true);
}
}
}
@@ -1127,7 +1127,7 @@ TEST(BreakPointICCall) {
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
- // Run with breakpoint
+ // Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
@@ -1144,6 +1144,73 @@ TEST(BreakPointICCall) {
}
+// Test that a break point can be set at an IC call location and survive a GC.
+TEST(BreakPointICCallWithGC) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function bar(){return 1;}"))->Run();
+ v8::Script::Compile(v8::String::New("function foo(){return bar();}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // Run without breakpoints.
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ int bp = SetBreakPoint(foo, 0);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Test that a break point can be set at an IC call location and survive a GC.
+TEST(BreakPointConstructCallWithGC) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function bar(){ this.x = 1;}"))->Run();
+ v8::Script::Compile(v8::String::New(
+ "function foo(){return new bar(1).x;}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // Run without breakpoints.
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ int bp = SetBreakPoint(foo, 0);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test that a break point can be set at a return store location.
TEST(BreakPointReturn) {
break_point_hit_count = 0;
@@ -2680,6 +2747,65 @@ TEST(DebugStepNamedLoadLoop) {
}
+static void DoDebugStepNamedStoreLoop(int expected, bool full_compiler = true) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which steps and counts before compiling the
+ // function to ensure the full compiler is used.
+ if (full_compiler) {
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+ }
+
+ // Create a function for testing stepping of named store.
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env,
+ "function foo() {\n"
+ " var a = {a:1};\n"
+ " for (var i = 0; i < 10; i++) {\n"
+ " a.a = 2\n"
+ " }\n"
+ "}\n",
+ "foo");
+
+ // Call function without any break points to ensure inlining is in place.
+ foo->Call(env->Global(), 0, NULL);
+
+ // Register a debug event listener which steps and counts after compiling the
+ // function to ensure the optimizing compiler is used.
+ if (!full_compiler) {
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+ }
+
+ // Setup break point and step through the function.
+ SetBreakPoint(foo, 3);
+ step_action = StepNext;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+
+ // With stepping all expected break locations are hit.
+ CHECK_EQ(expected, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Test of the stepping mechanism for named load in a loop.
+TEST(DebugStepNamedStoreLoopFull) {
+ // With the full compiler it is possible to break on the for statement.
+ DoDebugStepNamedStoreLoop(22);
+}
+
+
+// Test of the stepping mechanism for named load in a loop.
+TEST(DebugStepNamedStoreLoopOptimizing) {
+ // With the optimizing compiler it is not possible to break on the for
+ // statement as it uses a local variable thus no IC's.
+ DoDebugStepNamedStoreLoop(11, false);
+}
+
+
// Test the stepping mechanism with different ICs.
TEST(DebugStepLinearMixedICs) {
v8::HandleScope scope;
@@ -4465,6 +4591,18 @@ int GetTotalFramesInt(char *message) {
}
+// We match parts of the message to get source line.
+int GetSourceLineFromBreakEventMessage(char *message) {
+ const char* source_line = "\"sourceLine\":";
+ char* pos = strstr(message, source_line);
+ if (pos == NULL) {
+ return -1;
+ }
+ int res = -1;
+ res = StringToInt(pos + strlen(source_line));
+ return res;
+}
+
/* Test MessageQueues */
/* Tests the message queues that hold debugger commands and
* response messages to the debugger. Fills queues and makes
@@ -4744,6 +4882,9 @@ static void ThreadedMessageHandler(const v8::Debug::Message& message) {
v8::String::Value json(message.GetJSON());
Utf16ToAscii(*json, json.length(), print_buffer);
if (IsBreakEventMessage(print_buffer)) {
+ // Check that we are inside the while loop.
+ int source_line = GetSourceLineFromBreakEventMessage(print_buffer);
+ CHECK(8 <= source_line && source_line <= 13);
threaded_debugging_barriers.barrier_2.Wait();
}
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 0ba4f9ae47..61f5ffc727 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -422,6 +422,19 @@ TEST(Vfp) {
COMPARE(vmov(d3, d3, eq),
"0eb03b43 vmov.f64eq d3, d3");
+ COMPARE(vmov(s0, s31),
+ "eeb00a6f vmov.f32 s0, s31");
+ COMPARE(vmov(s31, s0),
+ "eef0fa40 vmov.f32 s31, s0");
+ COMPARE(vmov(r0, s0),
+ "ee100a10 vmov r0, s0");
+ COMPARE(vmov(r10, s31),
+ "ee1faa90 vmov r10, s31");
+ COMPARE(vmov(s0, r0),
+ "ee000a10 vmov s0, r0");
+ COMPARE(vmov(s31, r10),
+ "ee0faa90 vmov s31, r10");
+
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
COMPARE(vadd(d3, d4, d5, mi),
@@ -451,6 +464,41 @@ TEST(Vfp) {
"eeb70b00 vmov.f64 d0, #1");
COMPARE(vmov(d2, -13.0),
"eeba2b0a vmov.f64 d2, #-13");
+
+ COMPARE(vldr(s0, r0, 0),
+ "ed900a00 vldr s0, [r0 + 4*0]");
+ COMPARE(vldr(s1, r1, 4),
+ "edd10a01 vldr s1, [r1 + 4*1]");
+ COMPARE(vldr(s15, r4, 16),
+ "edd47a04 vldr s15, [r4 + 4*4]");
+ COMPARE(vldr(s16, r5, 20),
+ "ed958a05 vldr s16, [r5 + 4*5]");
+ COMPARE(vldr(s31, r10, 1020),
+ "eddafaff vldr s31, [r10 + 4*255]");
+
+ COMPARE(vstr(s0, r0, 0),
+ "ed800a00 vstr s0, [r0 + 4*0]");
+ COMPARE(vstr(s1, r1, 4),
+ "edc10a01 vstr s1, [r1 + 4*1]");
+ COMPARE(vstr(s15, r8, 8),
+ "edc87a02 vstr s15, [r8 + 4*2]");
+ COMPARE(vstr(s16, r9, 12),
+ "ed898a03 vstr s16, [r9 + 4*3]");
+ COMPARE(vstr(s31, r10, 1020),
+ "edcafaff vstr s31, [r10 + 4*255]");
+
+ COMPARE(vldr(d0, r0, 0),
+ "ed900b00 vldr d0, [r0 + 4*0]");
+ COMPARE(vldr(d1, r1, 4),
+ "ed911b01 vldr d1, [r1 + 4*1]");
+ COMPARE(vldr(d15, r10, 1020),
+ "ed9afbff vldr d15, [r10 + 4*255]");
+ COMPARE(vstr(d0, r0, 0),
+ "ed800b00 vstr d0, [r0 + 4*0]");
+ COMPARE(vstr(d1, r1, 4),
+ "ed811b01 vstr d1, [r1 + 4*1]");
+ COMPARE(vstr(d15, r10, 1020),
+ "ed8afbff vstr d15, [r10 + 4*255]");
}
VERIFY_RUN();
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 92ad0a4002..6dc49c0e54 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -372,6 +372,7 @@ TEST(RetainerProfile) {
i::HeapIterator iterator;
for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
ret_profile.CollectStats(obj);
+ ret_profile.CoarseAndAggregate();
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
const char* retainers_of_a = printer.GetRetainers("A");
@@ -650,6 +651,8 @@ TEST(HeapSnapshotCodeObjects) {
CompileAndRunScript(
"function lazy(x) { return x - 1; }\n"
"function compiled(x) { return x + 1; }\n"
+ "var inferred = function(x) { return x; }\n"
+ "var anonymous = (function() { return function() { return 0; } })();\n"
"compiled(1)");
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
@@ -663,6 +666,18 @@ TEST(HeapSnapshotCodeObjects) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
CHECK_NE(NULL, lazy);
CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
+ const v8::HeapGraphNode* inferred =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "inferred");
+ CHECK_NE(NULL, inferred);
+ CHECK_EQ(v8::HeapGraphNode::kClosure, inferred->GetType());
+ v8::String::AsciiValue inferred_name(inferred->GetName());
+ CHECK_EQ("inferred", *inferred_name);
+ const v8::HeapGraphNode* anonymous =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "anonymous");
+ CHECK_NE(NULL, anonymous);
+ CHECK_EQ(v8::HeapGraphNode::kClosure, anonymous->GetType());
+ v8::String::AsciiValue anonymous_name(anonymous->GetName());
+ CHECK_EQ("(anonymous function)", *anonymous_name);
// Find references to code.
const v8::HeapGraphNode* compiled_code =
@@ -864,4 +879,114 @@ TEST(Issue822) {
i::HeapSnapshotTester::CalculateNetworkSize(*jsobj);
}
+
+static const v8::HeapGraphNode* GetChild(
+ const v8::HeapGraphNode* node,
+ v8::HeapGraphNode::Type type,
+ const char* name,
+ const v8::HeapGraphNode* after = NULL) {
+ bool ignore_child = after == NULL ? false : true;
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ const v8::HeapGraphNode* child = prop->GetToNode();
+ v8::String::AsciiValue child_name(child->GetName());
+ if (!ignore_child
+ && child->GetType() == type
+ && strcmp(name, *child_name) == 0)
+ return child;
+ if (after != NULL && child == after) ignore_child = false;
+ }
+ return NULL;
+}
+
+static bool IsNodeRetainedAs(const v8::HeapGraphNode* node,
+ int element) {
+ for (int i = 0, count = node->GetRetainersCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetRetainer(i);
+ if (prop->GetType() == v8::HeapGraphEdge::kElement
+ && element == prop->GetName()->Int32Value())
+ return true;
+ }
+ return false;
+}
+
+TEST(AggregatedHeapSnapshot) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileAndRunScript(
+ "function A() {}\n"
+ "function B(x) { this.x = x; }\n"
+ "var a = new A();\n"
+ "var b = new B(a);");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(
+ v8::String::New("agg"), v8::HeapSnapshot::kAggregated);
+ const v8::HeapGraphNode* strings = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kInternal,
+ "STRING_TYPE");
+ CHECK_NE(NULL, strings);
+ CHECK_NE(0, strings->GetSelfSize());
+ CHECK_NE(0, strings->GetInstancesCount());
+ const v8::HeapGraphNode* maps = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kInternal,
+ "MAP_TYPE");
+ CHECK_NE(NULL, maps);
+ CHECK_NE(0, maps->GetSelfSize());
+ CHECK_NE(0, maps->GetInstancesCount());
+
+ const v8::HeapGraphNode* a = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a);
+ CHECK_NE(0, a->GetSelfSize());
+ CHECK_EQ(1, a->GetInstancesCount());
+
+ const v8::HeapGraphNode* b = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "B");
+ CHECK_NE(NULL, b);
+ CHECK_NE(0, b->GetSelfSize());
+ CHECK_EQ(1, b->GetInstancesCount());
+
+ const v8::HeapGraphNode* glob_prop = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "(global property)",
+ b);
+ CHECK_NE(NULL, glob_prop);
+ CHECK_EQ(0, glob_prop->GetSelfSize());
+ CHECK_EQ(0, glob_prop->GetInstancesCount());
+ CHECK_NE(0, glob_prop->GetChildrenCount());
+
+ const v8::HeapGraphNode* a_from_glob_prop = GetChild(
+ glob_prop,
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a_from_glob_prop);
+ CHECK_EQ(0, a_from_glob_prop->GetSelfSize());
+ CHECK_EQ(0, a_from_glob_prop->GetInstancesCount());
+ CHECK_EQ(0, a_from_glob_prop->GetChildrenCount()); // Retains nothing.
+ CHECK(IsNodeRetainedAs(a_from_glob_prop, 1)); // (global propery) has 1 ref.
+
+ const v8::HeapGraphNode* b_with_children = GetChild(
+ snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "B",
+ b);
+ CHECK_NE(NULL, b_with_children);
+ CHECK_EQ(0, b_with_children->GetSelfSize());
+ CHECK_EQ(0, b_with_children->GetInstancesCount());
+ CHECK_NE(0, b_with_children->GetChildrenCount());
+
+ const v8::HeapGraphNode* a_from_b = GetChild(
+ b_with_children,
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a_from_b);
+ CHECK_EQ(0, a_from_b->GetSelfSize());
+ CHECK_EQ(0, a_from_b->GetInstancesCount());
+ CHECK_EQ(0, a_from_b->GetChildrenCount()); // Retains nothing.
+ CHECK(IsNodeRetainedAs(a_from_b, 1)); // B has 1 ref to A.
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 2c286e3f0b..eec024f3fd 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -36,8 +36,8 @@ TEST(HeapMaps) {
InitializeVM();
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
- CheckMap(Heap::string_map(), STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ CheckMap(Heap::string_map(), STRING_TYPE, kVariableSizeSentinel);
}
@@ -637,22 +637,27 @@ TEST(JSArray) {
// Allocate the object.
Handle<JSObject> object = Factory::NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
- array->Initialize(0);
+ Object* ok = array->Initialize(0);
+ // We just initialized the VM, no heap allocation failure yet.
+ CHECK(!ok->IsFailure());
// Set array length to 0.
- array->SetElementsLength(Smi::FromInt(0));
+ ok = array->SetElementsLength(Smi::FromInt(0));
+ CHECK(!ok->IsFailure());
CHECK_EQ(Smi::FromInt(0), array->length());
CHECK(array->HasFastElements()); // Must be in fast mode.
// array[length] = name.
- array->SetElement(0, *name);
+ ok = array->SetElement(0, *name);
+ CHECK(!ok->IsFailure());
CHECK_EQ(Smi::FromInt(1), array->length());
CHECK_EQ(array->GetElement(0), *name);
// Set array length with larger than smi value.
Handle<Object> length =
Factory::NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
- array->SetElementsLength(*length);
+ ok = array->SetElementsLength(*length);
+ CHECK(!ok->IsFailure());
uint32_t int_length = 0;
CHECK(length->ToArrayIndex(&int_length));
@@ -660,7 +665,8 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- array->SetElement(int_length, *name);
+ ok = array->SetElement(int_length, *name);
+ CHECK(!ok->IsFailure());
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -684,8 +690,11 @@ TEST(JSObjectCopy) {
obj->SetProperty(*first, Smi::FromInt(1), NONE);
obj->SetProperty(*second, Smi::FromInt(2), NONE);
- obj->SetElement(0, *first);
- obj->SetElement(1, *second);
+ Object* ok = obj->SetElement(0, *first);
+ CHECK(!ok->IsFailure());
+
+ ok = obj->SetElement(1, *second);
+ CHECK(!ok->IsFailure());
// Make the clone.
Handle<JSObject> clone = Copy(obj);
@@ -701,8 +710,10 @@ TEST(JSObjectCopy) {
clone->SetProperty(*first, Smi::FromInt(2), NONE);
clone->SetProperty(*second, Smi::FromInt(1), NONE);
- clone->SetElement(0, *second);
- clone->SetElement(1, *first);
+ ok = clone->SetElement(0, *second);
+ CHECK(!ok->IsFailure());
+ ok = clone->SetElement(1, *first);
+ CHECK(!ok->IsFailure());
CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
@@ -973,9 +984,10 @@ TEST(TestCodeFlushing) {
Heap::CollectAllGarbage(true);
Heap::CollectAllGarbage(true);
- // foo should still be in the compilation cache and therefore not
- // have been removed.
CHECK(function->shared()->is_compiled());
+
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
Heap::CollectAllGarbage(true);
Heap::CollectAllGarbage(true);
Heap::CollectAllGarbage(true);
@@ -983,7 +995,9 @@ TEST(TestCodeFlushing) {
// foo should no longer be in the compilation cache
CHECK(!function->shared()->is_compiled());
+ CHECK(!function->is_compiled());
// Call foo to get it recompiled.
CompileRun("foo()");
CHECK(function->shared()->is_compiled());
+ CHECK(function->is_compiled());
}
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index 624b6e9391..e20ee8a360 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -99,3 +99,42 @@ TEST(ListAddAll) {
CHECK_EQ(i % 3, list[i]);
}
}
+
+
+TEST(RemoveLast) {
+ List<int> list(4);
+ CHECK_EQ(0, list.length());
+ list.Add(1);
+ CHECK_EQ(1, list.length());
+ CHECK_EQ(1, list.last());
+ list.RemoveLast();
+ CHECK_EQ(0, list.length());
+ list.Add(2);
+ list.Add(3);
+ CHECK_EQ(2, list.length());
+ CHECK_EQ(3, list.last());
+ list.RemoveLast();
+ CHECK_EQ(1, list.length());
+ CHECK_EQ(2, list.last());
+ list.RemoveLast();
+ CHECK_EQ(0, list.length());
+
+ const int kElements = 100;
+ for (int i = 0; i < kElements; i++) list.Add(i);
+ for (int j = kElements - 1; j >= 0; j--) {
+ CHECK_EQ(j + 1, list.length());
+ CHECK_EQ(j, list.last());
+ list.RemoveLast();
+ CHECK_EQ(j, list.length());
+ }
+}
+
+
+TEST(Clear) {
+ List<int> list(4);
+ CHECK_EQ(0, list.length());
+ for (int i = 0; i < 4; ++i) list.Add(i);
+ CHECK_EQ(4, list.length());
+ list.Clear();
+ CHECK_EQ(0, list.length());
+}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 6da1a75972..c92117678c 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -1,4 +1,29 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Tests of profiler-related functions from log.h
@@ -107,11 +132,17 @@ v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
Address TraceExtension::GetFP(const v8::Arguments& args) {
- CHECK_EQ(1, args.Length());
- // CodeGenerator::GenerateGetFramePointer pushes EBP / RBP value
- // on stack. In 64-bit mode we can't use Smi operations code because
- // they check that value is within Smi bounds.
+ // Convert frame pointer from encoding as smis in the arguments to a pointer.
+ CHECK_EQ(2, args.Length()); // Ignore second argument on 32-bit platform.
+#if defined(V8_HOST_ARCH_32_BIT)
Address fp = *reinterpret_cast<Address*>(*args[0]);
+#elif defined(V8_HOST_ARCH_64_BIT)
+ int64_t low_bits = *reinterpret_cast<uint64_t*>(*args[0]) >> 32;
+ int64_t high_bits = *reinterpret_cast<uint64_t*>(*args[1]);
+ Address fp = reinterpret_cast<Address>(high_bits | low_bits);
+#else
+#error Host architecture is neither 32-bit nor 64-bit.
+#endif
printf("Trace: %p\n", fp);
return fp;
}
@@ -210,51 +241,60 @@ static Handle<v8::internal::String> NewString(const char* s) {
}
-namespace v8 {
-namespace internal {
-
-class CodeGeneratorPatcher {
- public:
- CodeGeneratorPatcher() {
- CodeGenerator::InlineRuntimeLUT genGetFramePointer =
- {&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer", 0};
- // _RandomHeapNumber is just used as a dummy function that has zero
- // arguments, the same as the _GetFramePointer function we actually patch
- // in.
- bool result = CodeGenerator::PatchInlineRuntimeEntry(
- NewString("_RandomHeapNumber"),
- genGetFramePointer, &oldInlineEntry);
- CHECK(result);
- }
-
- ~CodeGeneratorPatcher() {
- CHECK(CodeGenerator::PatchInlineRuntimeEntry(
- NewString("_GetFramePointer"),
- oldInlineEntry, NULL));
- }
+// This C++ function is called as a constructor, to grab the frame pointer
+// from the calling function. When this function runs, the stack contains
+// a C_Entry frame and a Construct frame above the calling function's frame.
+static v8::Handle<Value> construct_call(const v8::Arguments& args) {
+ i::StackFrameIterator frame_iterator;
+ CHECK(frame_iterator.frame()->is_exit());
+ frame_iterator.Advance();
+ CHECK(frame_iterator.frame()->is_construct());
+ frame_iterator.Advance();
+ i::StackFrame* calling_frame = frame_iterator.frame();
+ CHECK(calling_frame->is_java_script());
+
+#if defined(V8_HOST_ARCH_32_BIT)
+ int32_t low_bits = reinterpret_cast<intptr_t>(calling_frame->fp());
+ args.This()->Set(v8_str("low_bits"), v8_num(low_bits >> 1));
+#elif defined(V8_HOST_ARCH_64_BIT)
+ int32_t low_bits = reinterpret_cast<uintptr_t>(calling_frame->fp());
+ int32_t high_bits = reinterpret_cast<uintptr_t>(calling_frame->fp()) >> 32;
+ args.This()->Set(v8_str("low_bits"), v8_num(low_bits));
+ args.This()->Set(v8_str("high_bits"), v8_num(high_bits));
+#else
+#error Host architecture is neither 32-bit nor 64-bit.
+#endif
+ return args.This();
+}
- private:
- CodeGenerator::InlineRuntimeLUT oldInlineEntry;
-};
-} } // namespace v8::internal
+// Use the API to create a JSFunction object that calls the above C++ function.
+void CreateFramePointerGrabberConstructor(const char* constructor_name) {
+ Local<v8::FunctionTemplate> constructor_template =
+ v8::FunctionTemplate::New(construct_call);
+ constructor_template->SetClassName(v8_str("FPGrabber"));
+ Local<Function> fun = constructor_template->GetFunction();
+ env->Global()->Set(v8_str(constructor_name), fun);
+}
// Creates a global function named 'func_name' that calls the tracing
// function 'trace_func_name' with an actual EBP register value,
-// shifted right to be presented as Smi.
+// encoded as one or two Smis.
static void CreateTraceCallerFunction(const char* func_name,
const char* trace_func_name) {
i::EmbeddedVector<char, 256> trace_call_buf;
- i::OS::SNPrintF(trace_call_buf, "%s(%%_GetFramePointer());", trace_func_name);
+ i::OS::SNPrintF(trace_call_buf,
+ "fp = new FPGrabber(); %s(fp.low_bits, fp.high_bits);",
+ trace_func_name);
+
+ // Create the FPGrabber function, which grabs the caller's frame pointer
+ // when called as a constructor.
+ CreateFramePointerGrabberConstructor("FPGrabber");
// Compile the script.
- i::CodeGeneratorPatcher patcher;
- bool allow_natives_syntax = i::FLAG_allow_natives_syntax;
- i::FLAG_allow_natives_syntax = true;
Handle<JSFunction> func = CompileFunction(trace_call_buf.start());
CHECK(!func.is_null());
- i::FLAG_allow_natives_syntax = allow_natives_syntax;
func->shared()->set_name(*NewString(func_name));
#ifdef DEBUG
@@ -273,11 +313,6 @@ static void CreateTraceCallerFunction(const char* func_name,
// StackTracer uses Top::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
- // TODO(711) The hack of replacing the inline runtime function
- // RandomHeapNumber with GetFrameNumber does not work with the way the full
- // compiler generates inline runtime calls.
- i::FLAG_always_full_compiler = false;
-
TickSample sample;
InitTraceEnv(&sample);
@@ -313,11 +348,6 @@ TEST(CFromJSStackTrace) {
// Top::c_entry_fp value. In this case, StackTracer uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
- // TODO(711) The hack of replacing the inline runtime function
- // RandomHeapNumber with GetFrameNumber does not work with the way the full
- // compiler generates inline runtime calls.
- i::FLAG_always_full_compiler = false;
-
TickSample sample;
InitTraceEnv(&sample);
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index d62b6a5d53..5ddd04416d 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -31,7 +31,9 @@
#include "token.h"
#include "scanner.h"
+#include "parser.h"
#include "utils.h"
+#include "execution.h"
#include "cctest.h"
@@ -127,3 +129,113 @@ TEST(KeywordMatcher) {
CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
}
+
+TEST(ScanHTMLEndComments) {
+ // Regression test. See:
+ // http://code.google.com/p/chromium/issues/detail?id=53548
+ // Tests that --> is correctly interpreted as comment-to-end-of-line if there
+ // is only whitespace before it on the line, even after a multiline-comment
+ // comment. This was not the case if it occurred before the first real token
+ // in the input.
+ const char* tests[] = {
+ // Before first real token.
+ "--> is eol-comment\nvar y = 37;\n",
+ "\n --> is eol-comment\nvar y = 37;\n",
+ "/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ "\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ // After first real token.
+ "var x = 42;\n--> is eol-comment\nvar y = 37;\n",
+ "var x = 42;\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ NULL
+ };
+
+ // Parser/Scanner needs a stack limit.
+ int marker;
+ i::StackGuard::SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ for (int i = 0; tests[i]; i++) {
+ v8::ScriptData* data =
+ v8::ScriptData::PreCompile(tests[i], strlen(tests[i]));
+ CHECK(data != NULL && !data->HasError());
+ delete data;
+ }
+}
+
+
+class ScriptResource : public v8::String::ExternalAsciiStringResource {
+ public:
+ ScriptResource(const char* data, size_t length)
+ : data_(data), length_(length) { }
+
+ const char* data() const { return data_; }
+ size_t length() const { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+
+
+TEST(Preparsing) {
+ v8::HandleScope handles;
+ v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Context::Scope context_scope(context);
+ int marker;
+ i::StackGuard::SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ // Source containing functions that might be lazily compiled and all types
+ // of symbols (string, propertyName, regexp).
+ const char* source =
+ "var x = 42;"
+ "function foo(a) { return function nolazy(b) { return a + b; } }"
+ "function bar(a) { if (a) return function lazy(b) { return b; } }"
+ "var z = {'string': 'string literal', bareword: 'propertyName', "
+ " 42: 'number literal', for: 'keyword as propertyName', "
+ " f\\u006fr: 'keyword propertyname with escape'};"
+ "var v = /RegExp Literal/;"
+ "var w = /RegExp Literal\\u0020With Escape/gin;"
+ "var y = { get getter() { return 42; }, "
+ " set setter(v) { this.value = v; }};";
+ int source_length = strlen(source);
+ const char* error_source = "var x = y z;";
+ int error_source_length = strlen(error_source);
+
+ v8::ScriptData* preparse =
+ v8::ScriptData::PreCompile(source, source_length);
+ CHECK(!preparse->HasError());
+ bool lazy_flag = i::FLAG_lazy;
+ {
+ i::FLAG_lazy = true;
+ ScriptResource* resource = new ScriptResource(source, source_length);
+ v8::Local<v8::String> script_source = v8::String::NewExternal(resource);
+ v8::Script::Compile(script_source, NULL, preparse);
+ }
+
+ {
+ i::FLAG_lazy = false;
+
+ ScriptResource* resource = new ScriptResource(source, source_length);
+ v8::Local<v8::String> script_source = v8::String::NewExternal(resource);
+ v8::Script::New(script_source, NULL, preparse, v8::Local<v8::String>());
+ }
+ delete preparse;
+ i::FLAG_lazy = lazy_flag;
+
+ // Syntax error.
+ v8::ScriptData* error_preparse =
+ v8::ScriptData::PreCompile(error_source, error_source_length);
+ CHECK(error_preparse->HasError());
+ i::ScriptDataImpl *pre_impl =
+ reinterpret_cast<i::ScriptDataImpl*>(error_preparse);
+ i::Scanner::Location error_location =
+ pre_impl->MessageLocation();
+ // Error is at "z" in source, location 10..11.
+ CHECK_EQ(10, error_location.beg_pos);
+ CHECK_EQ(11, error_location.end_pos);
+ // Should not crash.
+ const char* message = pre_impl->BuildMessage();
+ i::Vector<const char*> args = pre_impl->BuildArgs();
+ CHECK_GT(strlen(message), 0);
+}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index ea477de661..b36220284f 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -775,4 +775,21 @@ TEST(RecordStackTraceAtStartProfiling) {
CHECK_EQ(0, current->children()->length());
}
+
+TEST(Issue51919) {
+ CpuProfilesCollection collection;
+ i::EmbeddedVector<char*,
+ CpuProfilesCollection::kMaxSimultaneousProfiles> titles;
+ for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
+ i::Vector<char> title = i::Vector<char>::New(16);
+ i::OS::SNPrintF(title, "%d", i);
+ CHECK(collection.StartProfiling(title.start(), i + 1)); // UID must be > 0.
+ titles[i] = title.start();
+ }
+ CHECK(!collection.StartProfiling(
+ "maximum", CpuProfilesCollection::kMaxSimultaneousProfiles + 1));
+ for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
+ i::DeleteArray(titles[i]);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 3ec25c9f47..20fb2fe6c1 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -98,13 +98,6 @@ static int make_code(TypeCode type, int id) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-static int register_code(int reg) {
- return Debug::k_register_address << kDebugIdShift | reg;
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
TEST(ExternalReferenceEncoder) {
StatsTable::SetCounterFunction(counter_function);
Heap::Setup(false);
@@ -115,10 +108,6 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Runtime::kAbort));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)),
- Encode(encoder, Debug_Address(Debug::k_register_address, 3)));
-#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function_prototype =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
@@ -156,10 +145,6 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)),
- decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3))));
-#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(keyed_load_function.address(),
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index bcb185d24f..88ef0a204d 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -131,3 +131,64 @@ TEST(MemCopy) {
buffer2.Dispose();
buffer1.Dispose();
}
+
+
+TEST(Collector) {
+ Collector<int> collector(8);
+ const int kLoops = 5;
+ const int kSequentialSize = 1000;
+ const int kBlockSize = 7;
+ for (int loop = 0; loop < kLoops; loop++) {
+ Vector<int> block = collector.AddBlock(7, 0xbadcafe);
+ for (int i = 0; i < kSequentialSize; i++) {
+ collector.Add(i);
+ }
+ for (int i = 0; i < kBlockSize - 1; i++) {
+ block[i] = i * 7;
+ }
+ }
+ Vector<int> result = collector.ToVector();
+ CHECK_EQ(kLoops * (kBlockSize + kSequentialSize), result.length());
+ for (int i = 0; i < kLoops; i++) {
+ int offset = i * (kSequentialSize + kBlockSize);
+ for (int j = 0; j < kBlockSize - 1; j++) {
+ CHECK_EQ(j * 7, result[offset + j]);
+ }
+ CHECK_EQ(0xbadcafe, result[offset + kBlockSize - 1]);
+ for (int j = 0; j < kSequentialSize; j++) {
+ CHECK_EQ(j, result[offset + kBlockSize + j]);
+ }
+ }
+ result.Dispose();
+}
+
+
+TEST(SequenceCollector) {
+ SequenceCollector<int> collector(8);
+ const int kLoops = 5000;
+ const int kMaxSequenceSize = 13;
+ int total_length = 0;
+ for (int loop = 0; loop < kLoops; loop++) {
+ int seq_length = loop % kMaxSequenceSize;
+ collector.StartSequence();
+ for (int j = 0; j < seq_length; j++) {
+ collector.Add(j);
+ }
+ Vector<int> sequence = collector.EndSequence();
+ for (int j = 0; j < seq_length; j++) {
+ CHECK_EQ(j, sequence[j]);
+ }
+ total_length += seq_length;
+ }
+ Vector<int> result = collector.ToVector();
+ CHECK_EQ(total_length, result.length());
+ int offset = 0;
+ for (int loop = 0; loop < kLoops; loop++) {
+ int seq_length = loop % kMaxSequenceSize;
+ for (int j = 0; j < seq_length; j++) {
+ CHECK_EQ(j, result[offset]);
+ offset++;
+ }
+ }
+ result.Dispose();
+}
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index c2427c8dc5..485f2cfdb7 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -31,15 +31,12 @@ from os.path import join, dirname, exists
import platform
import utils
-CCTEST_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap']
-
class CcTestCase(test.TestCase):
def __init__(self, path, executable, mode, raw_name, dependency, context):
- super(CcTestCase, self).__init__(context, path)
+ super(CcTestCase, self).__init__(context, path, mode)
self.executable = executable
- self.mode = mode
self.raw_name = raw_name
self.dependency = dependency
@@ -54,8 +51,7 @@ class CcTestCase(test.TestCase):
serialization_file += '_' + self.GetName()
serialization_option = '--testing_serialization_file=' + serialization_file
result = [ self.executable, name, serialization_option ]
- if self.mode == 'debug':
- result += CCTEST_DEBUG_FLAGS
+ result += self.context.GetVmFlags(self, self.mode)
return result
def GetCommand(self):
diff --git a/deps/v8/test/es5conform/testcfg.py b/deps/v8/test/es5conform/testcfg.py
index d1f23aa312..43d6104719 100644
--- a/deps/v8/test/es5conform/testcfg.py
+++ b/deps/v8/test/es5conform/testcfg.py
@@ -37,9 +37,8 @@ HARNESS_FILES = ['sth.js']
class ES5ConformTestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
- super(ES5ConformTestCase, self).__init__(context, path)
+ super(ES5ConformTestCase, self).__init__(context, path, mode)
self.filename = filename
- self.mode = mode
self.framework = framework
self.root = root
@@ -55,7 +54,7 @@ class ES5ConformTestCase(test.TestCase):
return 'FAILED!' in output.stdout
def GetCommand(self):
- result = [self.context.GetVm(self.mode)]
+ result = self.context.GetVmCommand(self, self.mode)
result += ['-e', 'var window = this']
result += self.framework
result.append(self.filename)
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 6004282bf6..7dae047d51 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -35,11 +35,10 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class MessageTestCase(test.TestCase):
def __init__(self, path, file, expected, mode, context, config):
- super(MessageTestCase, self).__init__(context, path)
+ super(MessageTestCase, self).__init__(context, path, mode)
self.file = file
self.expected = expected
self.config = config
- self.mode = mode
def IgnoreLine(self, str):
"""Ignore empty lines and valgrind output."""
@@ -79,7 +78,7 @@ class MessageTestCase(test.TestCase):
return self.path[-1]
def GetCommand(self):
- result = [self.config.context.GetVm(self.mode)]
+ result = self.config.context.GetVmCommand(self, self.mode)
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 88c4876496..68dd9b2baf 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -67,13 +67,8 @@
(function() {
var array;
for (var i = 0; i < 7; i++) {
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
array = [1, 2, 3]
- assertEquals(undefined, array.splice());
+ assertEquals([], array.splice());
assertEquals([1, 2, 3], array);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
diff --git a/deps/v8/test/mjsunit/binary-op-newspace.js b/deps/v8/test/mjsunit/binary-op-newspace.js
index 8034209806..40d53b942c 100644
--- a/deps/v8/test/mjsunit/binary-op-newspace.js
+++ b/deps/v8/test/mjsunit/binary-op-newspace.js
@@ -25,21 +25,38 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-/**
- * @fileoverview Check that a mod where the stub code hits a failure
- * in heap number allocation still works.
- */
-
// Flags: --max-new-space-size=262144
+
+// Check that a mod where the stub code hits a failure in heap number
+// allocation still works.
+
function f(x) {
return x % 3;
}
-function test() {
+function testMod() {
for (var i = 0; i < 40000; i++) {
assertEquals(-1 / 0, 1 / f(-3));
}
}
-test();
+testMod();
+
+
+// Check that an add where the stub code hits a failure in heap number
+// allocation still works.
+
+function g(x, y) {
+ return x + y;
+}
+
+function testAdd() {
+ var lhs = 17.42;
+ var rhs = 42.17;
+ for (var i = 0; i < 40000; i++) {
+ assertEquals(59.59, g(lhs, rhs));
+ }
+}
+
+testAdd();
diff --git a/deps/v8/test/mjsunit/const-eval-init.js b/deps/v8/test/mjsunit/const-eval-init.js
index 5bcd9175ee..3f380d9d94 100644
--- a/deps/v8/test/mjsunit/const-eval-init.js
+++ b/deps/v8/test/mjsunit/const-eval-init.js
@@ -67,7 +67,9 @@ function testAssignmentArgument(x) {
assertEquals(7, x);
}
-testAssignmentArgument();
+for (var i = 0; i < 10000; i++) {
+ testAssignmentArgument();
+}
assertEquals(6, x);
__defineSetter__('x', function() { throw 42; });
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index 11ac2e0bef..901c190c80 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -176,7 +176,11 @@ var knownProblems = {
"_GetFromCache": true,
// This function expects its first argument to be a non-smi.
- "_IsStringWrapperSafeForDefaultValueOf" : true
+ "_IsStringWrapperSafeForDefaultValueOf" : true,
+
+ // Only applicable to strings.
+ "_HasCachedArrayIndex": true,
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 945b66249b..5353d6c548 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -317,3 +317,32 @@ TestInvalid('1); x++; (1');
// Test string conversion of argument.
var o = { toString: function() { return "42"; } };
assertEquals(42, JSON.parse(o));
+
+
+for (var i = 0; i < 65536; i++) {
+ var string = String.fromCharCode(i);
+ var encoded = JSON.stringify(string);
+ var expected = "uninitialized";
+ // Following the ES5 specification of the abstraction function Quote.
+ if (string == '"' || string == '\\') {
+ // Step 2.a
+ expected = '\\' + string;
+ } else if ("\b\t\n\r\f".indexOf(string) >= 0) {
+ // Step 2.b
+ if (string == '\b') expected = '\\b';
+ else if (string == '\t') expected = '\\t';
+ else if (string == '\n') expected = '\\n';
+ else if (string == '\f') expected = '\\f';
+ else if (string == '\r') expected = '\\r';
+ } else if (i < 32) {
+ // Step 2.c
+ if (i < 16) {
+ expected = "\\u000" + i.toString(16);
+ } else {
+ expected = "\\u00" + i.toString(16);
+ }
+ } else {
+ expected = string;
+ }
+ assertEquals('"' + expected + '"', encoded, "Codepoint " + i);
+}
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index a8891969f4..db8b13388e 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -484,3 +484,21 @@ assertRegExpTest(/[,b]\b[,b]/, ",b", true);
assertRegExpTest(/[,b]\B[,b]/, ",b", false);
assertRegExpTest(/[,b]\b[,b]/, "b,", true);
assertRegExpTest(/[,b]\B[,b]/, "b,", false);
+
+// Test that caching of result doesn't share result objects.
+// More iterations increases the chance of hitting a GC.
+for (var i = 0; i < 100; i++) {
+ var re = /x(y)z/;
+ var res = re.exec("axyzb");
+ assertTrue(!!res);
+ assertEquals(2, res.length);
+ assertEquals("xyz", res[0]);
+ assertEquals("y", res[1]);
+ assertEquals(1, res.index);
+ assertEquals("axyzb", res.input);
+ assertEquals(undefined, res.foobar);
+
+ res.foobar = "Arglebargle";
+ res[3] = "Glopglyf";
+ assertEquals("Arglebargle", res.foobar);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-842.js b/deps/v8/test/mjsunit/regress/regress-842.js
new file mode 100644
index 0000000000..18ad6d3d15
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-842.js
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// 842 describes a scenario where Object.prototype or Array.prototype is
+// changed (a property is added) after which freeze and seal would fail
+// since that property would be listed when doing a "for (var key in names)"
+
+Array.prototype.myfunc = function() {};
+Array.prototype[10] = 42;
+Array.prototype.length = 3000;
+
+var obj = { name: "n1" };
+
+try {
+ obj = Object.freeze(obj);
+} catch (e) {
+ assertUnreachable();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-851.js b/deps/v8/test/mjsunit/regress/regress-851.js
new file mode 100644
index 0000000000..d8f693ee8c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-851.js
@@ -0,0 +1,32 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var i = 0;
+for (var i = 0; i < 10000; i++) {
+ Object.freeze({});
+ assertNull(JSON.stringify({x: null}).match(/\0/));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-push-args-twice.js b/deps/v8/test/mjsunit/regress/regress-push-args-twice.js
new file mode 100644
index 0000000000..faa6007ca7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-push-args-twice.js
@@ -0,0 +1,37 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that the ADD binary op stub correctly handles non-number arguments
+// passed on registers.
+
+try {
+ for (var key = 0; key != 10; key++) {
+ var x = 1 + undefined;
+ }
+} catch(e) {
+ fail("no exception", e);
+}
diff --git a/deps/v8/test/mjsunit/shifts.js b/deps/v8/test/mjsunit/shifts.js
new file mode 100644
index 0000000000..b91b3e8a00
--- /dev/null
+++ b/deps/v8/test/mjsunit/shifts.js
@@ -0,0 +1,38 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nofull-compiler
+
+// test a few corners cases with shifts
+
+// The result of the shift is not a Smi.
+var s1 = 0x3fffffff;
+assertEquals(0x7fffffff, (s1 << 1) + 1);
+
+// The result of the shift is not a Smi.
+var s2 = -1;
+assertEquals(0xffffffff, (s2 >>> 0));
diff --git a/deps/v8/test/mjsunit/str-to-num.js b/deps/v8/test/mjsunit/str-to-num.js
index 20e298688c..28e98d9fb4 100644
--- a/deps/v8/test/mjsunit/str-to-num.js
+++ b/deps/v8/test/mjsunit/str-to-num.js
@@ -203,3 +203,7 @@ assertTrue(isNaN(toNumber("Infinity junk")), "Infinity junk");
assertTrue(isNaN(toNumber("1e")), "1e");
assertTrue(isNaN(toNumber("1e ")), "1e_");
assertTrue(isNaN(toNumber("1" + repeat('0', 1000) + 'junk')), "1e1000 junk");
+
+for (var i = 1; i < 12; i++) {
+ assertEquals(toNumber('1' + repeat('0', i)), Math.pow(10.0, i));
+}
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 49064b12fe..d8fe24d37a 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -31,7 +31,6 @@ from os.path import join, dirname, exists
import re
import tempfile
-MJSUNIT_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap']
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
@@ -40,10 +39,9 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
class MjsunitTestCase(test.TestCase):
def __init__(self, path, file, mode, context, config):
- super(MjsunitTestCase, self).__init__(context, path)
+ super(MjsunitTestCase, self).__init__(context, path, mode)
self.file = file
self.config = config
- self.mode = mode
self.self_script = False
def GetLabel(self):
@@ -53,13 +51,11 @@ class MjsunitTestCase(test.TestCase):
return self.path[-1]
def GetCommand(self):
- result = [self.config.context.GetVm(self.mode)]
+ result = self.config.context.GetVmCommand(self, self.mode)
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
result += flags_match.group(1).strip().split()
- if self.mode == 'debug':
- result += MJSUNIT_DEBUG_FLAGS
additional_files = []
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'
@@ -94,8 +90,8 @@ class MjsunitTestCase(test.TestCase):
self.self_script = self_script
return self_script
- def Cleanup(self):
- if self.self_script:
+ def AfterRun(self, result):
+ if self.self_script and (not result.HasPreciousOutput()):
test.CheckedUnlink(self.self_script)
class MjsunitTestConfiguration(test.TestConfiguration):
diff --git a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js b/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
index b676a7c165..974ac55e6b 100644
--- a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
+++ b/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
@@ -38,7 +38,7 @@ assertArrayEquals(['a','b'], arr.splice(0));
assertArrayEquals([], arr)
arr = ['a','b','c','d'];
-assertEquals(undefined, arr.splice())
+assertEquals([], arr.splice())
assertArrayEquals(['a','b','c','d'], arr);
assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
assertArrayEquals([], arr);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 28fc063154..1768c3975d 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -235,11 +235,6 @@ ecma_3/Number/15.7.4.7-1: FAIL_OK
# toExponential argument restricted to range 0..20 in JSC/V8
ecma_3/Number/15.7.4.6-1: FAIL_OK
-# Array.prototype.slice with zero arguments return undefined in JSC/V8,
-# empty array in Spider/TraceMonkey.
-js1_5/Array/regress-451483: FAIL_OK
-
-
#:=== RegExp:===
# To be compatible with JSC we silently ignore flags that do not make
# sense. These tests expects us to throw exceptions.
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index d1c1767a9a..7a6438f17b 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -57,9 +57,8 @@ TEST_DIRS = """
class MozillaTestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
- super(MozillaTestCase, self).__init__(context, path)
+ super(MozillaTestCase, self).__init__(context, path, mode)
self.filename = filename
- self.mode = mode
self.framework = framework
self.root = root
@@ -75,8 +74,8 @@ class MozillaTestCase(test.TestCase):
return 'FAILED!' in output.stdout
def GetCommand(self):
- result = [self.context.GetVm(self.mode), '--expose-gc',
- join(self.root, 'mozilla-shell-emulation.js')]
+ result = self.context.GetVmCommand(self, self.mode) + \
+ [ '--expose-gc', join(self.root, 'mozilla-shell-emulation.js') ]
result += self.framework
result.append(self.filename)
return result
diff --git a/deps/v8/test/sputnik/testcfg.py b/deps/v8/test/sputnik/testcfg.py
index 659238220b..f7a5edcca6 100644
--- a/deps/v8/test/sputnik/testcfg.py
+++ b/deps/v8/test/sputnik/testcfg.py
@@ -36,9 +36,8 @@ import time
class SputnikTestCase(test.TestCase):
def __init__(self, case, path, context, mode):
- super(SputnikTestCase, self).__init__(context, path)
+ super(SputnikTestCase, self).__init__(context, path, mode)
self.case = case
- self.mode = mode
self.tmpfile = None
self.source = None
@@ -56,12 +55,13 @@ class SputnikTestCase(test.TestCase):
self.tmpfile.Write(self.GetSource())
self.tmpfile.Close()
- def AfterRun(self):
- self.tmpfile.Dispose()
+ def AfterRun(self, result):
+ # Dispose the temporary file if everything looks okay.
+ if not result.HasPreciousOutput(): self.tmpfile.Dispose()
self.tmpfile = None
def GetCommand(self):
- result = [self.context.GetVm(self.mode)]
+ result = self.context.GetVmCommand(self, self.mode)
result.append(self.tmpfile.name)
return result
diff --git a/deps/v8/tools/gc-nvp-trace-processor.py b/deps/v8/tools/gc-nvp-trace-processor.py
index f1f9dc01c7..2c173ab56e 100755
--- a/deps/v8/tools/gc-nvp-trace-processor.py
+++ b/deps/v8/tools/gc-nvp-trace-processor.py
@@ -216,7 +216,7 @@ def reclaimed_bytes(row):
return row['total_size_before'] - row['total_size_after']
def other_scope(r):
- return r['pause'] - r['mark'] - r['sweep'] - r['compact'] - r['flushcode']
+ return r['pause'] - r['mark'] - r['sweep'] - r['compact']
plots = [
[
@@ -226,7 +226,6 @@ plots = [
Plot(Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('Compaction', 'compact', lc = 'red'),
- Item('Flush Code', 'flushcode', lc = 'yellow'),
Item('Other', other_scope, lc = 'grey'))
],
[
@@ -288,7 +287,10 @@ def process_trace(filename):
n = len(trace)
total = calc_total(trace, field)
max = calc_max(trace, field)
- avg = total / n
+ if n > 0:
+ avg = total / n
+ else:
+ avg = 0
if n > 1:
dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
(n - 1))
@@ -303,14 +305,14 @@ def process_trace(filename):
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
out.write('<table>')
- out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td><td>Max</td><td>Avg</td></tr>')
+ out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td>')
+ out.write('<td>Max</td><td>Avg</td></tr>')
stats(out, 'Total in GC', trace, 'pause')
stats(out, 'Scavenge', scavenges, 'pause')
stats(out, 'MarkSweep', marksweeps, 'pause')
stats(out, 'MarkCompact', markcompacts, 'pause')
stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
- stats(out, 'Flush Code', filter(lambda r: r['flushcode'] != 0, trace), 'flushcode')
stats(out, 'Compact', filter(lambda r: r['compact'] != 0, trace), 'compact')
out.write('</table>')
for chart in charts:
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 47f95026dd..b355fb6fe8 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -343,8 +343,6 @@
'../../src/fixed-dtoa.h',
'../../src/flags.cc',
'../../src/flags.h',
- '../../src/flow-graph.cc',
- '../../src/flow-graph.h',
'../../src/frame-element.cc',
'../../src/frame-element.h',
'../../src/frames-inl.h',
@@ -495,6 +493,8 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
+ '../../src/arm/code-stubs-arm.cc',
+ '../../src/arm/code-stubs-arm.h',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -541,6 +541,8 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.h',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -575,6 +577,8 @@
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
+ '../../src/x64/code-stubs-x64.cc',
+ '../../src/x64/code-stubs-x64.h',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
diff --git a/deps/v8/tools/oom_dump/README b/deps/v8/tools/oom_dump/README
index 5adbf65a0b..0be75116a0 100644
--- a/deps/v8/tools/oom_dump/README
+++ b/deps/v8/tools/oom_dump/README
@@ -1,4 +1,4 @@
-oom_dump extracts useful information from Google Chrome OOM minidumps.
+oom_dump extracts useful information from Google Chrome OOM minidumps.
To build one needs a google-breakpad checkout
(http://code.google.com/p/google-breakpad/).
@@ -15,7 +15,7 @@ need some additional tweaking to make it discoverable, for example,
put a soft link into /usr/lib directory).
Next step is to build v8. Note: you should build x64 version of v8,
-if you're on 64-bit platform, otherwise you would get link error when
+if you're on 64-bit platform, otherwise you would get a link error when
building oom_dump.
The last step is to build oom_dump itself. The following command should work:
@@ -23,8 +23,9 @@ The last step is to build oom_dump itself. The following command should work:
cd <v8 working copy>/tools/oom_dump
scons BREAKPAD_DIR=<path to google-breakpad working copy>
-(Additionally you can control v8 working copy dir, but default---../..---
-should work just fine).
+(Additionally you can control v8 working copy dir, but the default should work.)
If everything goes fine, oom_dump <path to minidump> should print
-some useful information about OOM crash.
+some useful information about the OOM crash.
+
+Note: currently only 32-bit Windows minidumps are supported.
diff --git a/deps/v8/tools/oom_dump/oom_dump.cc b/deps/v8/tools/oom_dump/oom_dump.cc
index 01f6005cbc..1bf5ac19fc 100644
--- a/deps/v8/tools/oom_dump/oom_dump.cc
+++ b/deps/v8/tools/oom_dump/oom_dump.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
+#include <algorithm>
+
#include <google_breakpad/processor/minidump.h>
-#include <processor/logging.h>
#define ENABLE_DEBUGGER_SUPPORT
@@ -128,6 +128,9 @@ void DumpHeapStats(const char *minidump_file) {
thread_list->GetThreadByID(exception_thread_id);
CHECK(exception_thread);
+ // Currently only 32-bit Windows minidumps are supported.
+ CHECK_EQ(MD_CONTEXT_X86, crash_context->GetContextCPU());
+
const MDRawContextX86* contextX86 = crash_context->GetContextX86();
CHECK(contextX86);
@@ -145,7 +148,7 @@ void DumpHeapStats(const char *minidump_file) {
if (value >= esp && value < last) {
u_int32_t value2 = 0;
CHECK(memory_region->GetMemoryAtAddress(value, &value2));
- if (value2 == 0xdecade00) {
+ if (value2 == v8::internal::HeapStats::kStartMarker) {
heap_stats_addr = addr;
break;
}
@@ -158,8 +161,8 @@ void DumpHeapStats(const char *minidump_file) {
#define READ_FIELD(offset) \
ReadPointedValue(memory_region, heap_stats_addr, offset)
- CHECK(READ_FIELD(0) == 0xdecade00);
- CHECK(READ_FIELD(23) == 0xdecade01);
+ CHECK(READ_FIELD(0) == v8::internal::HeapStats::kStartMarker);
+ CHECK(READ_FIELD(24) == v8::internal::HeapStats::kEndMarker);
const int new_space_size = READ_FIELD(1);
const int new_space_capacity = READ_FIELD(2);
@@ -181,6 +184,7 @@ void DumpHeapStats(const char *minidump_file) {
const int destroyed_global_handle_count = READ_FIELD(18);
const int memory_allocator_size = READ_FIELD(19);
const int memory_allocator_capacity = READ_FIELD(20);
+ const int os_error = READ_FIELD(23);
#undef READ_FIELD
int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
@@ -213,9 +217,9 @@ void DumpHeapStats(const char *minidump_file) {
// Print heap stats.
- printf("exception thread ID: %d (%x)\n",
+ printf("exception thread ID: %" PRIu32 " (%#" PRIx32 ")\n",
exception_thread_id, exception_thread_id);
- printf("heap stats address: %p\n", (void*)heap_stats_addr);
+ printf("heap stats address: %#" PRIx64 "\n", heap_stats_addr);
#define PRINT_INT_STAT(stat) \
printf("\t%-25s\t% 10d\n", #stat ":", stat);
#define PRINT_MB_STAT(stat) \
@@ -240,6 +244,7 @@ void DumpHeapStats(const char *minidump_file) {
PRINT_INT_STAT(destroyed_global_handle_count);
PRINT_MB_STAT(memory_allocator_size);
PRINT_MB_STAT(memory_allocator_capacity);
+ PRINT_INT_STAT(os_error);
#undef PRINT_STAT
printf("\n");
@@ -255,15 +260,15 @@ void DumpHeapStats(const char *minidump_file) {
const char* name = InstanceTypeToString(type);
if (name == NULL) {
// Unknown instance type. Check that there is no objects of that type.
- CHECK(objects_per_type[type] == 0);
- CHECK(size_per_type[type] == 0);
+ CHECK_EQ(0, objects_per_type[type]);
+ CHECK_EQ(0, size_per_type[type]);
continue;
}
int size = size_per_type[type];
running_size += size;
printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
name, objects_per_type[type], toM(size),
- 100.*size/total_size, 100.*running_size/total_size);
+ 100. * size / total_size, 100. * running_size / total_size);
}
printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
"total", 0, toM(total_size), 100., 100.);
@@ -272,8 +277,6 @@ void DumpHeapStats(const char *minidump_file) {
} // namespace
int main(int argc, char **argv) {
- BPLOG_INIT(&argc, &argv);
-
if (argc != 2) {
fprintf(stderr, "usage: %s <minidump>\n", argv[0]);
return 1;
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index f17e9b1c34..4b916f8599 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -331,10 +331,11 @@ class CommandOutput(object):
class TestCase(object):
- def __init__(self, context, path):
+ def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
+ self.mode = mode
def IsNegative(self):
return False
@@ -355,14 +356,19 @@ class TestCase(object):
def RunCommand(self, command):
full_command = self.context.processor(command)
- output = Execute(full_command, self.context, self.context.timeout)
+ output = Execute(full_command,
+ self.context,
+ self.context.GetTimeout(self.mode))
self.Cleanup()
- return TestOutput(self, full_command, output)
+ return TestOutput(self,
+ full_command,
+ output,
+ self.context.store_unexpected_output)
def BeforeRun(self):
pass
- def AfterRun(self):
+ def AfterRun(self, result):
pass
def Run(self):
@@ -370,7 +376,7 @@ class TestCase(object):
try:
result = self.RunCommand(self.GetCommand())
finally:
- self.AfterRun()
+ self.AfterRun(result)
return result
def Cleanup(self):
@@ -379,10 +385,11 @@ class TestCase(object):
class TestOutput(object):
- def __init__(self, test, command, output):
+ def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
+ self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
@@ -395,6 +402,9 @@ class TestOutput(object):
outcome = PASS
return not outcome in self.test.outcomes
+ def HasPreciousOutput(self):
+ return self.UnexpectedOutput() and self.store_unexpected_output
+
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
@@ -557,6 +567,11 @@ class TestSuite(object):
return self.name
+# Use this to run several variants of the tests, e.g.:
+# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
+VARIANT_FLAGS = [[]]
+
+
class TestRepository(TestSuite):
def __init__(self, path):
@@ -583,8 +598,12 @@ class TestRepository(TestSuite):
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
- def ListTests(self, current_path, path, context, mode):
- return self.GetConfiguration(context).ListTests(current_path, path, mode)
+ def AddTestsToList(self, result, current_path, path, context, mode):
+ for v in VARIANT_FLAGS:
+ tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
+ for t in tests: t.variant_flags = v
+ result += tests
+
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
@@ -611,7 +630,7 @@ class LiteralTestSuite(TestSuite):
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
- result += test.ListTests(full_path, path, context, mode)
+ test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
@@ -619,12 +638,20 @@ class LiteralTestSuite(TestSuite):
test.GetTestStatus(context, sections, defs)
-SUFFIX = {'debug': '_g', 'release': ''}
+SUFFIX = {
+ 'debug' : '_g',
+ 'release' : '' }
+FLAGS = {
+ 'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
+ 'release' : []}
+TIMEOUT_SCALEFACTOR = {
+ 'debug' : 4,
+ 'release' : 1 }
class Context(object):
- def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs):
+ def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
@@ -632,6 +659,7 @@ class Context(object):
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
+ self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = self.vm_root + SUFFIX[mode]
@@ -639,6 +667,15 @@ class Context(object):
name = name + '.exe'
return name
+ def GetVmCommand(self, testcase, mode):
+ return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
+
+ def GetVmFlags(self, testcase, mode):
+ return testcase.variant_flags + FLAGS[mode]
+
+ def GetTimeout(self, mode):
+ return self.timeout * TIMEOUT_SCALEFACTOR[mode]
+
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
@@ -1121,7 +1158,13 @@ def BuildOptions():
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
- result.add_option("--shell", help="Path to V8 shell", default="shell");
+ result.add_option("--shell", help="Path to V8 shell", default="shell")
+ result.add_option("--store-unexpected-output",
+ help="Store the temporary JS files from tests that fails",
+ dest="store_unexpected_output", default=True, action="store_true")
+ result.add_option("--no-store-unexpected-output",
+ help="Deletes the temporary JS files from tests that fails",
+ dest="store_unexpected_output", action="store_false")
return result
@@ -1258,11 +1301,13 @@ def Main():
shell = abspath(options.shell)
buildspace = dirname(shell)
+
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
- options.suppress_dialogs)
+ options.suppress_dialogs,
+ options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
@@ -1278,7 +1323,7 @@ def Main():
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
-
+
# Get status for tests
sections = [ ]
defs = { }
diff --git a/deps/v8/tools/utils.py b/deps/v8/tools/utils.py
index 505c39874e..8083091b6d 100644
--- a/deps/v8/tools/utils.py
+++ b/deps/v8/tools/utils.py
@@ -59,20 +59,24 @@ def GuessOS():
return 'openbsd'
elif id == 'SunOS':
return 'solaris'
- elif id.find('CYGWIN') >= 0:
- return 'cygwin'
else:
return None
+# This will default to building the 32 bit VM even on machines that are capable
+# of running the 64 bit VM. Use the scons option --arch=x64 to force it to build
+# the 64 bit VM.
def GuessArchitecture():
id = platform.machine()
+ id = id.lower() # Windows 7 capitalizes 'AMD64'.
if id.startswith('arm'):
return 'arm'
- elif (not id) or (not re.match('(x|i[3-6])86', id) is None):
+ elif (not id) or (not re.match('(x|i[3-6])86$', id) is None):
return 'ia32'
elif id == 'i86pc':
return 'ia32'
+ elif id == 'x86_64':
+ return 'ia32'
elif id == 'amd64':
return 'ia32'
else:
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index 0ca6a9ddf1..3ebc458421 100644
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -223,14 +223,12 @@
9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
- 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; };
9FA38BB71175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
9FA38BB81175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
9FA38BBA1175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
- 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; };
9FA38BBE1175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
9FA38BBF1175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
@@ -248,6 +246,8 @@
C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
+ C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */; };
+ C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081B012251239001EAFE4 /* code-stubs-ia32.cc */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@@ -596,8 +596,6 @@
9FA38BA01175B2D200C4CD55 /* double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = double.h; sourceTree = "<group>"; };
9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-dtoa.cc"; sourceTree = "<group>"; };
9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-dtoa.h"; sourceTree = "<group>"; };
- 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "flow-graph.cc"; sourceTree = "<group>"; };
- 9FA38BA41175B2D200C4CD55 /* flow-graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flow-graph.h"; sourceTree = "<group>"; };
9FA38BA51175B2D200C4CD55 /* full-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "full-codegen.cc"; sourceTree = "<group>"; };
9FA38BA61175B2D200C4CD55 /* full-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "full-codegen.h"; sourceTree = "<group>"; };
9FA38BA71175B2D200C4CD55 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = "<group>"; };
@@ -628,6 +626,10 @@
C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fixed-dtoa.h"; sourceTree = "<group>"; };
C2D1E9711212F27B00187A52 /* objects-visiting.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-visiting.cc"; sourceTree = "<group>"; };
C2D1E9721212F27B00187A52 /* objects-visiting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-visiting.h"; sourceTree = "<group>"; };
+ C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-arm.cc"; path = "arm/code-stubs-arm.cc"; sourceTree = "<group>"; };
+ C68081AC1225120B001EAFE4 /* code-stubs-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-arm.h"; path = "arm/code-stubs-arm.h"; sourceTree = "<group>"; };
+ C68081B012251239001EAFE4 /* code-stubs-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-ia32.cc"; path = "ia32/code-stubs-ia32.cc"; sourceTree = "<group>"; };
+ C68081B412251257001EAFE4 /* code-stubs-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-ia32.h"; path = "ia32/code-stubs-ia32.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@@ -716,6 +718,10 @@
897FF0D70E719AB300D62E90 /* C++ */ = {
isa = PBXGroup;
children = (
+ C68081B412251257001EAFE4 /* code-stubs-ia32.h */,
+ C68081B012251239001EAFE4 /* code-stubs-ia32.cc */,
+ C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */,
+ C68081AC1225120B001EAFE4 /* code-stubs-arm.h */,
897FF1750E719B8F00D62E90 /* SConscript */,
897FF0F60E719B8F00D62E90 /* accessors.cc */,
897FF0F70E719B8F00D62E90 /* accessors.h */,
@@ -816,8 +822,6 @@
89471C7F0EB23EE400B6874B /* flag-definitions.h */,
897FF1350E719B8F00D62E90 /* flags.cc */,
897FF1360E719B8F00D62E90 /* flags.h */,
- 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */,
- 9FA38BA41175B2D200C4CD55 /* flow-graph.h */,
8981F5FE1010500F00D1520E /* frame-element.cc */,
8981F5FF1010500F00D1520E /* frame-element.h */,
897FF1370E719B8F00D62E90 /* frames-arm.cc */,
@@ -1298,7 +1302,6 @@
89A88E040E71A65D0043BA31 /* factory.cc in Sources */,
9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89A88E050E71A65D0043BA31 /* flags.cc in Sources */,
- 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */,
8981F6001010501900D1520E /* frame-element.cc in Sources */,
89A88E060E71A6600043BA31 /* frames-ia32.cc in Sources */,
89A88E070E71A6610043BA31 /* frames.cc in Sources */,
@@ -1369,6 +1372,7 @@
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
+ C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1422,7 +1426,6 @@
89F23C570E78D5B2006B2466 /* factory.cc in Sources */,
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89F23C580E78D5B2006B2466 /* flags.cc in Sources */,
- 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */,
8981F6011010502800D1520E /* frame-element.cc in Sources */,
89F23C9C0E78D5F1006B2466 /* frames-arm.cc in Sources */,
89F23C5A0E78D5B2006B2466 /* frames.cc in Sources */,
@@ -1494,6 +1497,7 @@
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
+ C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index ef08773494..4629b5d370 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -305,6 +305,14 @@
>
</File>
<File
+ RelativePath="..\..\src\ia32\code-stubs-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\ia32\code-stubs-ia32.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>
@@ -481,14 +489,6 @@
>
</File>
<File
- RelativePath="..\..\src\flow-graph.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\flow-graph.h"
- >
- </File>
- <File
RelativePath="..\..\src\frame-element.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index aa1e8229e7..4848c9bc2b 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -277,6 +277,14 @@
>
</File>
<File
+ RelativePath="..\..\src\arm\code-stubs-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\arm\code-stubs-arm.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index 33c53940bd..f5cce2198c 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -277,6 +277,14 @@
>
</File>
<File
+ RelativePath="..\..\src\x64\code-stubs-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\code-stubs-x64.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>