summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-05-21 09:41:50 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-05-21 09:41:50 -0700
commit2b34363d03e0718c9e9f39982c723b806558c759 (patch)
tree0388b89e7794e3aa7c9ee2e923570cca56c7def9
parent9514a4d5476225e8c8310ce5acae2857033bcaaa (diff)
downloadnode-new-2b34363d03e0718c9e9f39982c723b806558c759.tar.gz
Upgrade V8 to 2.2.11
-rw-r--r--deps/v8/ChangeLog28
-rw-r--r--deps/v8/SConstruct15
-rw-r--r--deps/v8/benchmarks/README.txt8
-rw-r--r--deps/v8/benchmarks/base.js2
-rw-r--r--deps/v8/benchmarks/raytrace.js31
-rw-r--r--deps/v8/benchmarks/revisions.html7
-rw-r--r--deps/v8/benchmarks/run.html6
-rw-r--r--deps/v8/benchmarks/splay.js11
-rw-r--r--deps/v8/include/v8-debug.h48
-rw-r--r--deps/v8/include/v8-profiler.h25
-rw-r--r--deps/v8/samples/shell.cc5
-rwxr-xr-xdeps/v8/src/SConscript5
-rw-r--r--deps/v8/src/api.cc55
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h7
-rw-r--r--deps/v8/src/arm/assembler-arm.cc321
-rw-r--r--deps/v8/src/arm/assembler-arm.h29
-rw-r--r--deps/v8/src/arm/assembler-thumb2-inl.h263
-rw-r--r--deps/v8/src/arm/assembler-thumb2.cc1878
-rw-r--r--deps/v8/src/arm/assembler-thumb2.h1036
-rw-r--r--deps/v8/src/arm/builtins-arm.cc6
-rw-r--r--deps/v8/src/arm/codegen-arm.cc513
-rw-r--r--deps/v8/src/arm/codegen-arm.h16
-rw-r--r--deps/v8/src/arm/constants-arm.cc4
-rw-r--r--deps/v8/src/arm/cpu-arm.cc4
-rw-r--r--deps/v8/src/arm/debug-arm.cc11
-rw-r--r--deps/v8/src/arm/disasm-arm.cc4
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/frames-arm.cc8
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc170
-rw-r--r--deps/v8/src/arm/ic-arm.cc401
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc132
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc4
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc5
-rw-r--r--deps/v8/src/arm/register-allocator-arm.cc4
-rw-r--r--deps/v8/src/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc52
-rw-r--r--deps/v8/src/arm/virtual-frame-arm-inl.h53
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc122
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h106
-rw-r--r--deps/v8/src/assembler.cc3
-rw-r--r--deps/v8/src/assembler.h8
-rw-r--r--deps/v8/src/ast-inl.h79
-rw-r--r--deps/v8/src/ast.cc14
-rw-r--r--deps/v8/src/ast.h42
-rw-r--r--deps/v8/src/bootstrapper.cc2
-rw-r--r--deps/v8/src/builtins.cc59
-rw-r--r--deps/v8/src/codegen.h3
-rwxr-xr-xdeps/v8/src/compiler.cc16
-rw-r--r--deps/v8/src/cpu-profiler-inl.h2
-rw-r--r--deps/v8/src/cpu-profiler.cc47
-rw-r--r--deps/v8/src/cpu-profiler.h16
-rw-r--r--deps/v8/src/d8.js57
-rw-r--r--deps/v8/src/date.js11
-rw-r--r--deps/v8/src/debug-debugger.js31
-rw-r--r--deps/v8/src/debug.cc72
-rw-r--r--deps/v8/src/debug.h24
-rw-r--r--deps/v8/src/flag-definitions.h17
-rw-r--r--deps/v8/src/full-codegen.cc35
-rw-r--r--deps/v8/src/full-codegen.h76
-rw-r--r--deps/v8/src/globals.h18
-rw-r--r--deps/v8/src/heap.cc142
-rw-r--r--deps/v8/src/heap.h78
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h5
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc40
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h2
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc29
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc263
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h21
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc4
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc6
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc4
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc4
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1493
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc6
-rw-r--r--deps/v8/src/ia32/jump-target-ia32.cc4
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc91
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32.cc4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc4
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc4
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h40
-rw-r--r--deps/v8/src/jump-target-heavy.cc63
-rw-r--r--deps/v8/src/jump-target-heavy.h242
-rw-r--r--deps/v8/src/jump-target-light-inl.h14
-rw-r--r--deps/v8/src/jump-target-light.cc83
-rw-r--r--deps/v8/src/jump-target-light.h187
-rw-r--r--deps/v8/src/jump-target.cc64
-rw-r--r--deps/v8/src/jump-target.h218
-rw-r--r--deps/v8/src/liveedit.cc2
-rw-r--r--deps/v8/src/log.cc2
-rw-r--r--deps/v8/src/macro-assembler.h5
-rw-r--r--deps/v8/src/macros.py5
-rw-r--r--deps/v8/src/mark-compact.cc8
-rw-r--r--deps/v8/src/mips/assembler-mips.cc4
-rw-r--r--deps/v8/src/mips/builtins-mips.cc3
-rw-r--r--deps/v8/src/mips/codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/constants-mips.cc5
-rw-r--r--deps/v8/src/mips/cpu-mips.cc4
-rw-r--r--deps/v8/src/mips/debug-mips.cc3
-rw-r--r--deps/v8/src/mips/disasm-mips.cc3
-rw-r--r--deps/v8/src/mips/fast-codegen-mips.cc3
-rw-r--r--deps/v8/src/mips/frames-mips.cc3
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/ic-mips.cc3
-rw-r--r--deps/v8/src/mips/jump-target-mips.cc3
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc3
-rw-r--r--deps/v8/src/mips/register-allocator-mips.cc3
-rw-r--r--deps/v8/src/mips/simulator-mips.cc3
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc3
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.cc3
-rw-r--r--deps/v8/src/objects-inl.h12
-rw-r--r--deps/v8/src/objects.cc61
-rw-r--r--deps/v8/src/objects.h34
-rw-r--r--deps/v8/src/parser.cc5
-rw-r--r--deps/v8/src/platform-solaris.cc33
-rw-r--r--deps/v8/src/platform.h3
-rw-r--r--deps/v8/src/profile-generator-inl.h17
-rw-r--r--deps/v8/src/profile-generator.cc287
-rw-r--r--deps/v8/src/profile-generator.h62
-rw-r--r--deps/v8/src/register-allocator.cc7
-rw-r--r--deps/v8/src/runtime.js15
-rw-r--r--deps/v8/src/serialize.cc484
-rw-r--r--deps/v8/src/serialize.h129
-rw-r--r--deps/v8/src/string.js193
-rw-r--r--deps/v8/src/third_party/dtoa/dtoa.c4
-rw-r--r--deps/v8/src/v8natives.js21
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/virtual-frame-heavy-inl.h40
-rw-r--r--deps/v8/src/virtual-frame-light-inl.h93
-rw-r--r--deps/v8/src/virtual-frame-light.cc3
-rw-r--r--deps/v8/src/x64/assembler-x64.cc138
-rw-r--r--deps/v8/src/x64/assembler-x64.h11
-rw-r--r--deps/v8/src/x64/builtins-x64.cc5
-rw-r--r--deps/v8/src/x64/codegen-x64.cc552
-rw-r--r--deps/v8/src/x64/codegen-x64.h29
-rw-r--r--deps/v8/src/x64/cpu-x64.cc4
-rw-r--r--deps/v8/src/x64/debug-x64.cc10
-rw-r--r--deps/v8/src/x64/disasm-x64.cc5
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc4
-rw-r--r--deps/v8/src/x64/frames-x64.cc4
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1510
-rw-r--r--deps/v8/src/x64/ic-x64.cc246
-rw-r--r--deps/v8/src/x64/jump-target-x64.cc4
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc24
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h10
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc104
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h3
-rw-r--r--deps/v8/src/x64/register-allocator-x64.cc4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc13
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc144
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h69
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc6
-rw-r--r--deps/v8/test/cctest/test-debug.cc25
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc1
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc64
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc214
-rw-r--r--deps/v8/test/mjsunit/arguments-load-across-eval.js86
-rw-r--r--deps/v8/test/mjsunit/array-concat.js79
-rw-r--r--deps/v8/test/mjsunit/array-pop.js28
-rw-r--r--deps/v8/test/mjsunit/array-shift.js37
-rw-r--r--deps/v8/test/mjsunit/array-slice.js47
-rw-r--r--deps/v8/test/mjsunit/array-splice.js57
-rw-r--r--deps/v8/test/mjsunit/array-unshift.js79
-rw-r--r--deps/v8/test/mjsunit/compiler/assignment.js12
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status16
-rw-r--r--deps/v8/test/mjsunit/property-load-across-eval.js2
-rw-r--r--deps/v8/test/mozilla/mozilla.status1
-rw-r--r--deps/v8/tools/gc-nvp-trace-processor.py282
-rw-r--r--deps/v8/tools/gyp/v8.gyp44
-rw-r--r--deps/v8/tools/v8.xcodeproj/project.pbxproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj8
176 files changed, 8628 insertions, 6128 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index c2d4e46a63..33b6d142f7 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,29 @@
+2010-05-21: Version 2.2.11
+
+ Fix crash bug in liveedit on 64 bit.
+
+ Use 'full compiler' when debugging is active. This should increase
+ the density of possible break points, making single step more fine
+ grained. This will only take effect for functions compiled after
+ debugging has been started, so recompilation of all functions is
+ required to get the full effect. IA32 and x64 only for now.
+
+ Misc. fixes to the Solaris build.
+
+ Add new flags --print-cumulative-gc-stat and --trace-gc-nvp.
+
+ Add filtering of CPU profiles by security context.
+
+ Fix crash bug on ARM when running without VFP2 or VFP3.
+
+ Incremental performance improvements in all backends.
+
+
+2010-05-17: Version 2.2.10
+
+ Performance improvements in the x64 and ARM backends.
+
+
2010-05-10: Version 2.2.9
Allow Object.create to be called with a function (issue 697).
@@ -6,7 +32,7 @@
non date string (issue 696).
Allow unaligned memory accesses on ARM targets that support it (by
- Subrato K De of CodeAurora <subratokde@codeaurora.org>).
+ Subrato K De of CodeAurora <subratokde@codeaurora.org>).
C++ API for retrieving JavaScript stack trace information.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 597d033459..7cf866ca55 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -210,12 +210,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
'LINKFLAGS': ['-m32']
},
- 'armvariant:thumb2': {
- 'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
- },
- 'armvariant:arm': {
- 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
- },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
@@ -764,11 +758,6 @@ SIMPLE_OPTIONS = {
'default': 'hidden',
'help': 'shared library symbol visibility'
},
- 'armvariant': {
- 'values': ['arm', 'thumb2', 'none'],
- 'default': 'none',
- 'help': 'generate thumb2 instructions instead of arm instructions (default)'
- },
'pgo': {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
@@ -962,10 +951,6 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
- if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
- options['armvariant'] = 'arm'
- if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
- options['armvariant'] = 'none'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt
index eb759cc92c..8e08159da8 100644
--- a/deps/v8/benchmarks/README.txt
+++ b/deps/v8/benchmarks/README.txt
@@ -61,3 +61,11 @@ Removed duplicate line in random seed code, and changed the name of
the Object.prototype.inherits function in the DeltaBlue benchmark to
inheritsFrom to avoid name clashes when running in Chromium with
extensions enabled.
+
+
+Changes from Version 5 to Version 6
+===================================
+
+Removed dead code from the RayTrace benchmark and changed the Splay
+benchmark to avoid converting the same numeric key to a string over
+and over again.
diff --git a/deps/v8/benchmarks/base.js b/deps/v8/benchmarks/base.js
index 67cddd205e..ce308419ed 100644
--- a/deps/v8/benchmarks/base.js
+++ b/deps/v8/benchmarks/base.js
@@ -78,7 +78,7 @@ BenchmarkSuite.suites = [];
// Scores are not comparable across versions. Bump the version if
// you're making changes that will affect that scores, e.g. if you add
// a new benchmark or change an existing one.
-BenchmarkSuite.version = '5';
+BenchmarkSuite.version = '6 (candidate)';
// To make the benchmark results predictable, we replace Math.random
diff --git a/deps/v8/benchmarks/raytrace.js b/deps/v8/benchmarks/raytrace.js
index c68b0383a3..da4d5924aa 100644
--- a/deps/v8/benchmarks/raytrace.js
+++ b/deps/v8/benchmarks/raytrace.js
@@ -205,12 +205,6 @@ Flog.RayTracer.Light.prototype = {
this.intensity = (intensity ? intensity : 10.0);
},
- getIntensity: function(distance){
- if(distance >= intensity) return 0;
-
- return Math.pow((intensity - distance) / strength, 0.2);
- },
-
toString : function () {
return 'Light [' + this.position.x + ',' + this.position.y + ',' + this.position.z + ']';
}
@@ -420,31 +414,6 @@ if(typeof(Flog) == 'undefined') var Flog = {};
if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
-Flog.RayTracer.Shape.BaseShape = Class.create();
-
-Flog.RayTracer.Shape.BaseShape.prototype = {
- position: null,
- material: null,
-
- initialize : function() {
- this.position = new Vector(0,0,0);
- this.material = new Flog.RayTracer.Material.SolidMaterial(
- new Flog.RayTracer.Color(1,0,1),
- 0,
- 0,
- 0
- );
- },
-
- toString : function () {
- return 'Material [gloss=' + this.gloss + ', transparency=' + this.transparency + ', hasTexture=' + this.hasTexture +']';
- }
-}
-/* Fake a Flog.* namespace */
-if(typeof(Flog) == 'undefined') var Flog = {};
-if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
-if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
-
Flog.RayTracer.Shape.Sphere = Class.create();
Flog.RayTracer.Shape.Sphere.prototype = {
diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html
index 99d7be42b9..b03aa126d6 100644
--- a/deps/v8/benchmarks/revisions.html
+++ b/deps/v8/benchmarks/revisions.html
@@ -20,6 +20,13 @@ the benchmark suite.
</p>
+<div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>
+
+<p>Removed dead code from the RayTrace benchmark and changed the Splay
+benchmark to avoid converting the same numeric key to a string over
+and over again.
+</p>
+
<div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div>
<p>Removed duplicate line in random seed code, and changed the name of
diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html
index ef2c186412..30036b7843 100644
--- a/deps/v8/benchmarks/run.html
+++ b/deps/v8/benchmarks/run.html
@@ -111,12 +111,12 @@ higher scores means better performance: <em>Bigger is better!</em>
<li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
<li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
<li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
-<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
-<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
+<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>904 lines</i>).</li>
+<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4684 lines</i>).</li>
<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>).
</li>
-<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
+<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>379 lines</i>).</li>
</ul>
<p>
diff --git a/deps/v8/benchmarks/splay.js b/deps/v8/benchmarks/splay.js
index 53fc72793e..d8c8f04271 100644
--- a/deps/v8/benchmarks/splay.js
+++ b/deps/v8/benchmarks/splay.js
@@ -46,16 +46,16 @@ var kSplayTreePayloadDepth = 5;
var splayTree = null;
-function GeneratePayloadTree(depth, key) {
+function GeneratePayloadTree(depth, tag) {
if (depth == 0) {
return {
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
- string : 'String for key ' + key + ' in leaf node'
+ string : 'String for key ' + tag + ' in leaf node'
};
} else {
return {
- left: GeneratePayloadTree(depth - 1, key),
- right: GeneratePayloadTree(depth - 1, key)
+ left: GeneratePayloadTree(depth - 1, tag),
+ right: GeneratePayloadTree(depth - 1, tag)
};
}
}
@@ -74,7 +74,8 @@ function InsertNewNode() {
do {
key = GenerateKey();
} while (splayTree.find(key) != null);
- splayTree.insert(key, GeneratePayloadTree(kSplayTreePayloadDepth, key));
+ var payload = GeneratePayloadTree(kSplayTreePayloadDepth, String(key));
+ splayTree.insert(key, payload);
return key;
}
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index f7b4fa12e3..c53b63462a 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -144,6 +144,39 @@ class EXPORT Debug {
/**
+ * An event details object passed to the debug event listener.
+ */
+ class EventDetails {
+ public:
+ /**
+ * Event type.
+ */
+ virtual DebugEvent GetEvent() const = 0;
+
+ /**
+ * Access to execution state and event data of the debug event. Don't store
+ * these cross callbacks as their content becomes invalid.
+ */
+ virtual Handle<Object> GetExecutionState() const = 0;
+ virtual Handle<Object> GetEventData() const = 0;
+
+ /**
+ * Get the context active when the debug event happened. Note this is not
+ * the current active context as the JavaScript part of the debugger is
+ * running in it's own context which is entered at this point.
+ */
+ virtual Handle<Context> GetEventContext() const = 0;
+
+ /**
+ * Client data passed with the corresponding callbak whet it was registered.
+ */
+ virtual Handle<Value> GetCallbackData() const = 0;
+
+ virtual ~EventDetails() {}
+ };
+
+
+ /**
* Debug event callback function.
*
* \param event the type of the debug event that triggered the callback
@@ -157,6 +190,15 @@ class EXPORT Debug {
Handle<Object> event_data,
Handle<Value> data);
+ /**
+ * Debug event callback function.
+ *
+ * \param event_details object providing information about the debug event
+ *
+ * A EventCallback2 does not take possession of the event data,
+ * and must not rely on the data persisting after the handler returns.
+ */
+ typedef void (*EventCallback2)(const EventDetails& event_details);
/**
* Debug message callback function.
@@ -165,7 +207,7 @@ class EXPORT Debug {
* \param length length of the message
* \param client_data the data value passed when registering the message handler
- * A MessageHandler does not take posession of the message string,
+ * A MessageHandler does not take possession of the message string,
* and must not rely on the data persisting after the handler returns.
*
* This message handler is deprecated. Use MessageHandler2 instead.
@@ -178,7 +220,7 @@ class EXPORT Debug {
*
* \param message the debug message handler message object
- * A MessageHandler does not take posession of the message data,
+ * A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
@@ -196,6 +238,8 @@ class EXPORT Debug {
// Set a C debug event listener.
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
+ static bool SetDebugEventListener2(EventCallback2 that,
+ Handle<Value> data = Handle<Value>());
// Set a JavaScript debug event listener.
static bool SetDebugEventListener(v8::Handle<v8::Object> that,
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index f1b8ffbbdc..bb4107221c 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -140,22 +140,37 @@ class V8EXPORT CpuProfile {
class V8EXPORT CpuProfiler {
public:
/**
+ * A note on security tokens usage. As scripts from different
+ * origins can run inside a single V8 instance, it is possible to
+ * have functions from different security contexts intermixed in a
+ * single CPU profile. To avoid exposing function names belonging to
+ * other contexts, filtering by security token is performed while
+ * obtaining profiling results.
+ */
+
+ /**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
*/
static int GetProfilesCount();
/** Returns a profile by index. */
- static const CpuProfile* GetProfile(int index);
+ static const CpuProfile* GetProfile(
+ int index,
+ Handle<Value> security_token = Handle<Value>());
/** Returns a profile by uid. */
- static const CpuProfile* FindProfile(unsigned uid);
+ static const CpuProfile* FindProfile(
+ unsigned uid,
+ Handle<Value> security_token = Handle<Value>());
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
* once. Attempts to start collecting several profiles with the same
- * title are silently ignored.
+ * title are silently ignored. While collecting a profile, functions
+ * from all security contexts are included in it. The token-based
+ * filtering is only performed when querying for a profile.
*/
static void StartProfiling(Handle<String> title);
@@ -163,7 +178,9 @@ class V8EXPORT CpuProfiler {
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
- static const CpuProfile* StopProfiling(Handle<String> title);
+ static const CpuProfile* StopProfiling(
+ Handle<String> title,
+ Handle<Value> security_token = Handle<Value>());
};
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 27ed293e7e..1a13f5f80b 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -299,5 +299,10 @@ void ReportException(v8::TryCatch* try_catch) {
printf("^");
}
printf("\n");
+ v8::String::Utf8Value stack_trace(try_catch->StackTrace());
+ if (stack_trace.length() > 0) {
+ const char* stack_trace_string = ToCString(stack_trace);
+ printf("%s\n", stack_trace_string);
+ }
}
}
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index b68f6d1d23..8466a0c557 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -136,13 +136,8 @@ SOURCES = {
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
- """),
- 'armvariant:arm': Split("""
arm/assembler-arm.cc
"""),
- 'armvariant:thumb2': Split("""
- arm/assembler-thumb2.cc
- """),
'arch:mips': Split("""
fast-codegen.cc
mips/assembler-mips.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index a4c38b72c2..cf940c6e00 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -48,7 +48,7 @@
#define LOG_API(expr) LOG(ApiEntryCall(expr))
-#ifdef ENABLE_HEAP_PROTECTION
+#ifdef ENABLE_VMSTATE_TRACKING
#define ENTER_V8 i::VMState __state__(i::OTHER)
#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
#else
@@ -3992,10 +3992,40 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
// --- D e b u g S u p p o r t ---
#ifdef ENABLE_DEBUGGER_SUPPORT
+
+static v8::Debug::EventCallback event_callback = NULL;
+
+static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
+ if (event_callback) {
+ event_callback(event_details.GetEvent(),
+ event_details.GetExecutionState(),
+ event_details.GetEventData(),
+ event_details.GetCallbackData());
+ }
+}
+
+
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
EnsureInitialized("v8::Debug::SetDebugEventListener()");
ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
ENTER_V8;
+
+ event_callback = that;
+
+ HandleScope scope;
+ i::Handle<i::Object> proxy = i::Factory::undefined_value();
+ if (that != NULL) {
+ proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+ }
+ i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+ return true;
+}
+
+
+bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
+ EnsureInitialized("v8::Debug::SetDebugEventListener2()");
+ ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
+ ENTER_V8;
HandleScope scope;
i::Handle<i::Object> proxy = i::Factory::undefined_value();
if (that != NULL) {
@@ -4250,15 +4280,23 @@ int CpuProfiler::GetProfilesCount() {
}
-const CpuProfile* CpuProfiler::GetProfile(int index) {
+const CpuProfile* CpuProfiler::GetProfile(int index,
+ Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::GetProfile");
- return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
+ return reinterpret_cast<const CpuProfile*>(
+ i::CpuProfiler::GetProfile(
+ security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+ index));
}
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
+ Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::FindProfile");
- return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
+ return reinterpret_cast<const CpuProfile*>(
+ i::CpuProfiler::FindProfile(
+ security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+ uid));
}
@@ -4268,10 +4306,13 @@ void CpuProfiler::StartProfiling(Handle<String> title) {
}
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
+ Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
+ i::CpuProfiler::StopProfiling(
+ security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+ *Utils::OpenHandle(*title)));
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3f0854e333..a5c5bd1440 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -169,13 +169,6 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
}
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index f1f59ced7f..dba62e62c6 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -36,6 +36,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
@@ -106,6 +108,15 @@ void CpuFeatures::Probe() {
const int RelocInfo::kApplyMask = 0;
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on ARM means that it is a movw/movt instruction. We don't
+ // generate those yet.
+ return false;
+}
+
+
+
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -268,6 +279,20 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+// A mask for the Rd register for push, pop, ldr, str instructions.
+const Instr kRdMask = 0x0000f000;
+static const int kRdShift = 12;
+static const Instr kLdrRegFpOffsetPattern =
+ al | B26 | L | Offset | fp.code() * B16;
+static const Instr kStrRegFpOffsetPattern =
+ al | B26 | Offset | fp.code() * B16;
+static const Instr kLdrRegFpNegOffsetPattern =
+ al | B26 | L | NegOffset | fp.code() * B16;
+static const Instr kStrRegFpNegOffsetPattern =
+ al | B26 | NegOffset | fp.code() * B16;
+static const Instr kLdrStrInstrTypeMask = 0xffff0000;
+static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+static const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
@@ -395,6 +420,43 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
}
+Register Assembler::GetRd(Instr instr) {
+ Register reg;
+ reg.code_ = ((instr & kRdMask) >> kRdShift);
+ return reg;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+ return ((instr & ~kRdMask) == kPushRegPattern);
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+ return ((instr & ~kRdMask) == kPopRegPattern);
+}
+
+
+bool Assembler::IsStrRegFpOffset(Instr instr) {
+ return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpOffset(Instr instr) {
+ return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsStrRegFpNegOffset(Instr instr) {
+ return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
+ return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -887,15 +949,12 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
// str(src, MemOperand(sp, 4, NegPreIndex), al);
// add(sp, sp, Operand(kPointerSize));
// Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
}
}
@@ -1086,20 +1145,170 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
}
addrmod2(cond | B26 | L, dst, src);
- // Eliminate pattern: push(r), pop(r)
- // str(r, MemOperand(sp, 4, NegPreIndex), al)
- // ldr(r, MemOperand(sp, 4, PostIndex), al)
- // Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ // Eliminate pattern: push(ry), pop(rx)
+ // str(ry, MemOperand(sp, 4, NegPreIndex), al)
+ // ldr(rx, MemOperand(sp, 4, PostIndex), al)
+ // Both instructions can be eliminated if ry = rx.
+ // If ry != rx, a register copy from ry to rx is inserted
+ // after eliminating the push and the pop instructions.
+ Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
+
+ if (can_peephole_optimize(2) &&
+ IsPush(push_instr) &&
+ IsPop(pop_instr)) {
+ if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+ // For consecutive push and pop on different registers,
+ // we delete both the push & pop and insert a register move.
+ // push ry, pop rx --> mov rx, ry
+ Register reg_pushed, reg_popped;
+ reg_pushed = GetRd(push_instr);
+ reg_popped = GetRd(pop_instr);
+ pc_ -= 2 * kInstrSize;
+ // Insert a mov instruction, which is better than a pair of push & pop
+ mov(reg_popped, reg_pushed);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
+ }
+ } else {
+ // For consecutive push and pop on the same register,
+ // both the push and the pop can be deleted.
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
+ }
+ }
+
+ if (can_peephole_optimize(2)) {
+ Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
+
+ if ((IsStrRegFpOffset(str_instr) &&
+ IsLdrRegFpOffset(ldr_instr)) ||
+ (IsStrRegFpNegOffset(str_instr) &&
+ IsLdrRegFpNegOffset(ldr_instr))) {
+ if ((ldr_instr & kLdrStrInstrArgumentMask) ==
+ (str_instr & kLdrStrInstrArgumentMask)) {
+ // Pattern: Ldr/str same fp+offset, same register.
+ //
+ // The following:
+ // str rx, [fp, #-12]
+ // ldr rx, [fp, #-12]
+ //
+ // Becomes:
+ // str rx, [fp, #-12]
+
+ pc_ -= 1 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
+ }
+ } else if ((ldr_instr & kLdrStrOffsetMask) ==
+ (str_instr & kLdrStrOffsetMask)) {
+ // Pattern: Ldr/str same fp+offset, different register.
+ //
+ // The following:
+ // str rx, [fp, #-12]
+ // ldr ry, [fp, #-12]
+ //
+ // Becomes:
+ // str rx, [fp, #-12]
+ // mov ry, rx
+
+ Register reg_stored, reg_loaded;
+ reg_stored = GetRd(str_instr);
+ reg_loaded = GetRd(ldr_instr);
+ pc_ -= 1 * kInstrSize;
+ // Insert a mov instruction, which is better than ldr.
+ mov(reg_loaded, reg_stored);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
+ }
+ }
+ }
+ }
+
+ if (can_peephole_optimize(3)) {
+ Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
+ Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
+ if (IsPush(mem_write_instr) &&
+ IsPop(mem_read_instr)) {
+ if ((IsLdrRegFpOffset(ldr_instr) ||
+ IsLdrRegFpNegOffset(ldr_instr))) {
+ if ((mem_write_instr & kRdMask) ==
+ (mem_read_instr & kRdMask)) {
+ // Pattern: push & pop from/to same register,
+ // with a fp+offset ldr in between
+ //
+ // The following:
+ // str rx, [sp, #-4]!
+ // ldr rz, [fp, #-24]
+ // ldr rx, [sp], #+4
+ //
+ // Becomes:
+ // if(rx == rz)
+ // delete all
+ // else
+ // ldr rz, [fp, #-24]
+
+ if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ pc_ -= 3 * kInstrSize;
+ } else {
+ pc_ -= 3 * kInstrSize;
+ // Reinsert back the ldr rz.
+ emit(ldr_instr);
+ }
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+ }
+ } else {
+ // Pattern: push & pop from/to different registers
+ // with a fp+offset ldr in between
+ //
+ // The following:
+ // str rx, [sp, #-4]!
+ // ldr rz, [fp, #-24]
+ // ldr ry, [sp], #+4
+ //
+ // Becomes:
+ // if(ry == rz)
+ // mov ry, rx;
+ // else if(rx != rz)
+ // ldr rz, [fp, #-24]
+ // mov ry, rx
+ // else if((ry != rz) || (rx == rz)) becomes:
+ // mov ry, rx
+ // ldr rz, [fp, #-24]
+
+ Register reg_pushed, reg_popped;
+ if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ reg_pushed = GetRd(mem_write_instr);
+ reg_popped = GetRd(mem_read_instr);
+ pc_ -= 3 * kInstrSize;
+ mov(reg_popped, reg_pushed);
+ } else if ((mem_write_instr & kRdMask)
+ != (ldr_instr & kRdMask)) {
+ reg_pushed = GetRd(mem_write_instr);
+ reg_popped = GetRd(mem_read_instr);
+ pc_ -= 3 * kInstrSize;
+ emit(ldr_instr);
+ mov(reg_popped, reg_pushed);
+ } else if (((mem_read_instr & kRdMask)
+ != (ldr_instr & kRdMask)) ||
+ ((mem_write_instr & kRdMask)
+ == (ldr_instr & kRdMask)) ) {
+ reg_pushed = GetRd(mem_write_instr);
+ reg_popped = GetRd(mem_read_instr);
+ pc_ -= 3 * kInstrSize;
+ mov(reg_popped, reg_pushed);
+ emit(ldr_instr);
+ }
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
+ }
+ }
+ }
}
}
}
@@ -1111,16 +1320,13 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
// Eliminate pattern: pop(), push(r)
// add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
// -> str r, [sp, 0], al
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
}
}
@@ -1162,12 +1368,18 @@ void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B4, dst, src);
#else
- ldr(dst, src, cond);
+ // Generate two ldr instructions if ldrd is not available.
MemOperand src1(src);
src1.set_offset(src1.offset() + 4);
Register dst1(dst);
- dst1.code_ = dst1.code_ + 1;
- ldr(dst1, src1, cond);
+ dst1.set_code(dst1.code() + 1);
+ if (dst.is(src.rn())) {
+ ldr(dst1, src1, cond);
+ ldr(dst, src, cond);
+ } else {
+ ldr(dst, src, cond);
+ ldr(dst1, src1, cond);
+ }
#endif
}
@@ -1177,11 +1389,12 @@ void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
#else
- str(src, dst, cond);
+ // Generate two str instructions if strd is not available.
MemOperand dst1(dst);
dst1.set_offset(dst1.offset() + 4);
Register src1(src);
- src1.code_ = src1.code_ + 1;
+ src1.set_code(src1.code() + 1);
+ str(src, dst, cond);
str(src1, dst1, cond);
#endif
}
@@ -1216,26 +1429,6 @@ void Assembler::stm(BlockAddrMode am,
}
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
- Register src,
- Register base,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#ifndef __arm__
@@ -1779,34 +1972,6 @@ void Assembler::nop(int type) {
}
-void Assembler::lea(Register dst,
- const MemOperand& x,
- SBit s,
- Condition cond) {
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.offset_), s, cond);
- else
- add(dst, x.rn_, Operand(x.offset_), s, cond);
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- else
- add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- }
-}
-
-
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
@@ -2062,3 +2227,5 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 61b84d434f..5a5d64b7b6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -80,6 +80,11 @@ struct Register {
return 1 << code_;
}
+ void set_code(int code) {
+ code_ = code;
+ ASSERT(is_valid());
+ }
+
// Unfortunately we can't make this private in a struct.
int code_;
};
@@ -458,7 +463,8 @@ class MemOperand BASE_EMBEDDED {
return offset_;
}
- Register rm() const {return rm_;}
+ Register rn() const { return rn_; }
+ Register rm() const { return rm_; }
private:
Register rn_; // base
@@ -774,10 +780,6 @@ class Assembler : public Malloced {
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
- // Semaphore instructions
- void swp(Register dst, Register src, Register base, Condition cond = al);
- void swpb(Register dst, Register src, Register base, Condition cond = al);
-
// Exception-generating instructions and debugging support
void stop(const char* msg);
@@ -924,10 +926,6 @@ class Assembler : public Malloced {
add(sp, sp, Operand(kPointerSize));
}
- // Load effective address of memory operand x into register dst
- void lea(Register dst, const MemOperand& x,
- SBit s = LeaveCC, Condition cond = al);
-
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
@@ -976,6 +974,12 @@ class Assembler : public Malloced {
int current_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
+ bool can_peephole_optimize(int instructions) {
+ if (!FLAG_peephole_optimization) return false;
+ if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
+ return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
+ }
+
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
@@ -987,6 +991,13 @@ class Assembler : public Malloced {
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+ static Register GetRd(Instr instr);
+ static bool IsPush(Instr instr);
+ static bool IsPop(Instr instr);
+ static bool IsStrRegFpOffset(Instr instr);
+ static bool IsLdrRegFpOffset(Instr instr);
+ static bool IsStrRegFpNegOffset(Instr instr);
+ static bool IsLdrRegFpNegOffset(Instr instr);
protected:
diff --git a/deps/v8/src/arm/assembler-thumb2-inl.h b/deps/v8/src/arm/assembler-thumb2-inl.h
deleted file mode 100644
index 9e0fc2f731..0000000000
--- a/deps/v8/src/arm/assembler-thumb2-inl.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
-#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
-
-#include "arm/assembler-thumb2.h"
-#include "cpu.h"
-
-
-namespace v8 {
-namespace internal {
-
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
- }
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // On ARM a "call instruction" is actually two instructions.
- // mov lr, pc
- // ldr pc, [pc, #XXX]
- return (Assembler::instr_at(pc_) == kMovLrPc)
- && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
- == kLdrPCPattern);
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_address_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
- // Verify that the instruction to patch is a
- // ldr<cond> <Rd>, [pc +/- offset_12].
- ASSERT((instr & 0x0f7f0000) == 0x051f0000);
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
-}
-
-
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/deps/v8/src/arm/assembler-thumb2.cc b/deps/v8/src/arm/assembler-thumb2.cc
deleted file mode 100644
index e31c429175..0000000000
--- a/deps/v8/src/arm/assembler-thumb2.cc
+++ /dev/null
@@ -1,1878 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#include "arm/assembler-thumb2-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Safe default is no features.
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-void CpuFeatures::Probe() {
- // If the compiler is allowed to use vfp then we can use vfp too in our
- // code generation.
-#if !defined(__arm__)
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
- if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
- if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
- }
-#else
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- if (OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if
- // runtime detection of VFP returns true.
- supported_ |= 1u << VFP3;
- found_by_runtime_probing_ |= 1u << VFP3;
- }
-
- if (OS::ArmCpuHasFeature(ARMv7)) {
- supported_ |= 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << ARMv7;
- }
-#endif
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0 = { 0 };
-Register r1 = { 1 };
-Register r2 = { 2 };
-Register r3 = { 3 };
-Register r4 = { 4 };
-Register r5 = { 5 };
-Register r6 = { 6 };
-Register r7 = { 7 };
-Register r8 = { 8 }; // Used as context register.
-Register r9 = { 9 };
-Register r10 = { 10 }; // Used as roots register.
-Register fp = { 11 };
-Register ip = { 12 };
-Register sp = { 13 };
-Register lr = { 14 };
-Register pc = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0 = { 0 };
-CRegister cr1 = { 1 };
-CRegister cr2 = { 2 };
-CRegister cr3 = { 3 };
-CRegister cr4 = { 4 };
-CRegister cr5 = { 5 };
-CRegister cr6 = { 6 };
-CRegister cr7 = { 7 };
-CRegister cr8 = { 8 };
-CRegister cr9 = { 9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0 = { 0 };
-SwVfpRegister s1 = { 1 };
-SwVfpRegister s2 = { 2 };
-SwVfpRegister s3 = { 3 };
-SwVfpRegister s4 = { 4 };
-SwVfpRegister s5 = { 5 };
-SwVfpRegister s6 = { 6 };
-SwVfpRegister s7 = { 7 };
-SwVfpRegister s8 = { 8 };
-SwVfpRegister s9 = { 9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0 = { 0 };
-DwVfpRegister d1 = { 1 };
-DwVfpRegister d2 = { 2 };
-DwVfpRegister d3 = { 3 };
-DwVfpRegister d4 = { 4 };
-DwVfpRegister d5 = { 5 };
-DwVfpRegister d6 = { 6 };
-DwVfpRegister d7 = { 7 };
-DwVfpRegister d8 = { 8 };
-DwVfpRegister d9 = { 9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-thumb2-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
- // encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
- shift_op_ = ROR;
- shift_imm_ = 0;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
- H = 1 << 5, // halfword (or byte)
- S6 = 1 << 6, // signed (or unsigned)
- L = 1 << 20, // load (or store)
- S = 1 << 20, // set condition code (or leave unchanged)
- W = 1 << 21, // writeback base register (or leave unchanged)
- A = 1 << 21, // accumulate in multiply instruction (or not)
- B = 1 << 22, // unsigned byte (or word)
- N = 1 << 22, // long (or short)
- U = 1 << 23, // positive (or negative) offset/index
- P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
- I = 1 << 25, // immediate shifter operand (or not)
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
-
- // Instruction bit masks.
- RdMask = 15 << 12, // in str instruction
- CondMask = 15 << 28,
- CoprocessorMask = 15 << 8,
- OpCodeMask = 15 << 21, // in data-processing instructions
- Imm24Mask = (1 << 24) - 1,
- Off12Mask = (1 << 12) - 1,
- // Reserved condition.
- nv = 15 << 28
-};
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
- al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-static const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | sp.code() * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-static const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | sp.code() * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
-// ldr pc, [pc, #XXX]
-const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Setup buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
- next_buffer_check_ = 0;
- no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
- last_bound_pos_ = 0;
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
-}
-
-
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
-
- // Setup code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
- }
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & Imm24Mask) << 8) >> 6;
- if ((instr & CondMask) == nv && (instr & B24) != 0)
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
-
- return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
- int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if ((instr & CondMask) == nv) {
- // blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- ASSERT((imm26 & 3) == 0);
- instr &= ~Imm24Mask;
- }
- int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~Imm24Mask) == 0) {
- PrintF("value\n");
- } else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- int cond = instr & CondMask;
- const char* b;
- const char* c;
- if (cond == nv) {
- b = "blx";
- c = "";
- } else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
- }
- }
- PrintF("%s%s\n", b, c);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- int fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
- L->Unuse();
- }
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
- // imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
- if ((imm8 <= 0xff)) {
- *rotate_imm = rot;
- *immed_8 = imm8;
- return true;
- }
- }
- // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
- if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= 0x2*B21;
- return true;
- }
- }
- return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- return Serializer::enabled();
- } else if (rmode == RelocInfo::NONE) {
- return false;
- }
- return true;
-}
-
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
- CheckBuffer();
- ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- RecordRelocInfo(x.rmode_, x.imm32_);
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = static_cast<Condition>(instr & CondMask);
- if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- ldr(ip, MemOperand(pc, 0), cond);
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
- }
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // Immediate shift.
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- } else {
- // Register shift.
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
- }
- emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc))
- // Block constant pool emission for one instruction after reading pc.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | B | L)) == B26);
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_12 = x.offset_;
- if (offset_12 < 0) {
- offset_12 = -offset_12;
- am ^= U;
- }
- if (!is_uint12(offset_12)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_12 >= 0); // no masking needed
- instr |= offset_12;
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_8 = x.offset_;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- if (!is_uint8(offset_8)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_8 >= 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
- } else if (x.shift_imm_ != 0) {
- // Scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- } else {
- // Register offset.
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
- instr |= x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
- emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // Unindexed addressing is not encoded by this function.
- ASSERT_EQ((B27 | B26),
- (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
- int am = x.am_;
- int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
- offset_8 >>= 2;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
-
- // Post-indexed addressing requires W == 1; different than in addrmod2/3.
- if ((am & P) == 0)
- am |= W;
-
- ASSERT(offset_8 >= 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(pc_offset());
- }
-
- // Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & Imm24Mask));
-
- if (cond == al)
- // Dead code is a good location to emit the constant pool.
- CheckConstPool(false, false);
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) { // v5 and above
- WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
- int h = ((branch_offset & 2) >> 1)*B24;
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
- WriteRecordedPositions();
- ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
-}
-
-
-// Data-processing instructions.
-
-// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
-// Instruction details available in ARM DDI 0406A, A8-464.
-// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
-// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
-void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond) {
- ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
- ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
- ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
- emit(cond | 0x3F*B21 | src3.imm32_*B16 |
- dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
-}
-
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 0*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 1*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 2*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 3*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 4*B21 | s, src1, dst, src2);
-
- // Eliminate pattern: push(r), pop()
- // str(src, MemOperand(sp, 4, NegPreIndex), al);
- // add(sp, sp, Operand(kPointerSize));
- // Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 5*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 6*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 7*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 8*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 9*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 10*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 11*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 12*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- WriteRecordedPositions();
- }
- addrmod1(cond | 13*B21 | s, r0, dst, src);
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 14*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | 15*B21 | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- // dst goes in bits 16-19 for this instruction!
- emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | B4 | src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
- Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
- Instr instr;
- if (!src.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- msr(fields, Operand(ip), cond);
- return;
- }
- instr = I | rotate_imm*B8 | immed_8;
- } else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
- instr = src.rm_.code();
- }
- emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- WriteRecordedPositions();
- }
- addrmod2(cond | B26 | L, dst, src);
-
- // Eliminate pattern: push(r), pop(r)
- // str(r, MemOperand(sp, 4, NegPreIndex), al)
- // ldr(r, MemOperand(sp, 4, PostIndex), al)
- // Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
-
- // Eliminate pattern: pop(), push(r)
- // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
- // -> str r, [sp, 0], al
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
- pc_ -= 2 * kInstrSize;
- emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
- Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
- addrmod4(cond | B27 | am | L, base, dst);
-
- // Emit the constant pool after a function return implemented by ldm ..{..pc}.
- if (cond == al && (dst & pc.bit()) != 0) {
- // There is a slight chance that the ldm instruction was actually a call,
- // in which case it would be wrong to return into the constant pool; we
- // recognize this case by checking if the emission of the pool was blocked
- // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
- // the case, we emit a jump over the pool.
- CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
- }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
- Condition cond) {
- addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
- Register src,
- Register base,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
-// Exception-generating instructions and debugging support.
-void Assembler::stop(const char* msg) {
-#if !defined(__arm__)
- // The simulator handles these special instructions and stops execution.
- emit(15 << 28 | ((intptr_t) msg));
-#else
- // Just issue a simple break instruction for now. Alternatively we could use
- // the swi(0x9f0001) instruction on Linux.
- bkpt(0);
-#endif
-}
-
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
-}
-
-
-void Assembler::swi(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-// Support for VFP.
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(offset % 4 == 0);
- emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(offset % 4 == 0);
- emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
- // Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src1.is(pc) && !src2.is(pc));
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
-}
-
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
- // <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond) {
- // Sn = Rt.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src.is(pc));
- emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
- src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Rt = Sn.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst.is(pc));
- emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
- dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
-}
-
-
-void Assembler::vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
- // Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
- dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
- (0x1 & src.code())*B5 | (src.code() >> 1));
-}
-
-
-void Assembler::vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
- // Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
- // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
- 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
- 0x5*B9 | B8 | B7 | B6 | src.code());
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const SBit s,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xF*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-// Pseudo instructions.
-void Assembler::lea(Register dst,
- const MemOperand& x,
- SBit s,
- Condition cond) {
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.offset_), s, cond);
- else
- add(dst, x.rn_, Operand(x.offset_), s, cond);
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- else
- add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- }
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
- uint32_t dummy1;
- uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::RecordPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
-
-
-void Assembler::WriteRecordedPositions() {
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- }
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // no overflow
-
- // Setup new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // These modes do not need an entry in the constant pool.
- } else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- }
- if (rinfo.rmode() != RelocInfo::NONE) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries.
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
- }
-
- // If we did not return by now, we need to emit the constant pool soon.
-
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // no_const_pool_before_, which is checked here. Also, recursive calls to
- // CheckConstPool are blocked by no_const_pool_before_.
- if (pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- next_buffer_check_ = no_const_pool_before_;
-
- // Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
- return;
- }
-
- int jump_instr = require_jump ? kInstrSize : 0;
-
- // Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
- // Block recursive calls to CheckConstPool.
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A3.1 Instruction set encoding.
- emit(0x03000000 | num_prinfo_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset].
- // P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
- }
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/arm/assembler-thumb2.h b/deps/v8/src/arm/assembler-thumb2.h
deleted file mode 100644
index 2da11389ac..0000000000
--- a/deps/v8/src/arm/assembler-thumb2.h
+++ /dev/null
@@ -1,1036 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
-#define V8_ARM_ASSEMBLER_THUMB2_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-// Core register
-struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
-
-
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
-
-
-// Coprocessor number
-enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
- p10 = 10,
- p11 = 11,
- p12 = 12,
- p13 = 13,
- p14 = 14,
- p15 = 15
-};
-
-
-// Condition field in instructions.
-enum Condition {
- eq = 0 << 28, // Z set equal.
- ne = 1 << 28, // Z clear not equal.
- nz = 1 << 28, // Z clear not zero.
- cs = 2 << 28, // C set carry set.
- hs = 2 << 28, // C set unsigned higher or same.
- cc = 3 << 28, // C clear carry clear.
- lo = 3 << 28, // C clear unsigned lower.
- mi = 4 << 28, // N set negative.
- pl = 5 << 28, // N clear positive or zero.
- vs = 6 << 28, // V set overflow.
- vc = 7 << 28, // V clear no overflow.
- hi = 8 << 28, // C set, Z clear unsigned higher.
- ls = 9 << 28, // C clear or Z set unsigned lower or same.
- ge = 10 << 28, // N == V greater or equal.
- lt = 11 << 28, // N != V less than.
- gt = 12 << 28, // Z clear, N == V greater than.
- le = 13 << 28, // Z set or N != V less then or equal
- al = 14 << 28 // always.
-};
-
-
-// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cc;
- };
-}
-
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
- LSL = 0 << 5,
- LSR = 1 << 5,
- ASR = 2 << 5,
- ROR = 3 << 5,
- RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
- SetCC = 1 << 20, // set condition code
- LeaveCC = 0 << 20 // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
- // bit encoding P U W
- Offset = (8|4|0) << 21, // offset (without writeback to base)
- PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
- PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
- NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
- NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
- NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
- // bit encoding P U W
- da = (0|0|0) << 21, // decrement after
- ia = (0|4|0) << 21, // increment after
- db = (8|0|0) << 21, // decrement before
- ib = (8|4|0) << 21, // increment before
- da_w = (0|0|1) << 21, // decrement after with writeback to base
- ia_w = (0|4|1) << 21, // increment after with writeback to base
- db_w = (8|0|1) << 21, // decrement before with writeback to base
- ib_w = (8|4|1) << 21 // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
- Long = 1 << 22, // long load/store coprocessor
- Short = 0 << 22 // short load/store coprocessor
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
- // immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
- INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // rm
- INLINE(explicit Operand(Register rm));
-
- // rm <shift_op> shift_imm
- explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
- // rm <shift_op> rs
- explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- Register rm() const { return rm_; }
-
- private:
- Register rm_;
- Register rs_;
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
- // [rn +/- offset] Offset/NegOffset
- // [rn +/- offset]! PreIndex/NegPreIndex
- // [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
- explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
- // [rn +/- rm] Offset/NegOffset
- // [rn +/- rm]! PreIndex/NegPreIndex
- // [rn], +/- rm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
- // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
- // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
- // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- private:
- Register rn_; // base
- Register rm_; // register offset
- int32_t offset_; // valid if rm_ == no_reg
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
-
- friend class Assembler;
-};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (1u << f)) != 0;
- }
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f) {
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= 1u << f;
- }
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
- private:
- unsigned old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- static unsigned supported_;
- static unsigned enabled_;
- static unsigned found_by_runtime_probing_;
-};
-
-
-typedef int32_t Instr;
-
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCPattern;
-
-
-class Assembler : public Malloced {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
- ~Assembler();
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
-
- // Here we are patching the address in the constant pool, not the actual call
- // instruction. The address in the constant pool is the same size as a
- // pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Distance between the instruction referring to the address of the call
- // target (ldr pc, [target addr in const pool]) and the return address
- static const int kCallTargetAddressOffset = kInstrSize;
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 8;
-
- static const int kJSReturnSequenceLength = 4;
-
- // ---------------------------------------------------------------------------
- // Code generation
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
-
- // Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
- void blx(int branch_offset); // v5 and above
- void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
-
- // Convenience branch instructions using labels
- void b(Label* L, Condition cond = al) {
- b(branch_offset(L, cond == al), cond);
- }
- void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
- void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
- void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
- void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
-
- // Data-processing instructions
- void ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond = al);
-
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- sub(dst, src1, Operand(src2), s, cond);
- }
-
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void tst(Register src1, const Operand& src2, Condition cond = al);
- void tst(Register src1, Register src2, Condition cond = al) {
- tst(src1, Operand(src2), cond);
- }
-
- void teq(Register src1, const Operand& src2, Condition cond = al);
-
- void cmp(Register src1, const Operand& src2, Condition cond = al);
- void cmp(Register src1, Register src2, Condition cond = al) {
- cmp(src1, Operand(src2), cond);
- }
-
- void cmn(Register src1, const Operand& src2, Condition cond = al);
-
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- orr(dst, src1, Operand(src2), s, cond);
- }
-
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
- void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
- mov(dst, Operand(src), s, cond);
- }
-
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
-
- // Multiply instructions
-
- void mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s = LeaveCC, Condition cond = al);
-
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- // Miscellaneous arithmetic instructions
-
- void clz(Register dst, Register src, Condition cond = al); // v5 and above
-
- // Status register access instructions
-
- void mrs(Register dst, SRegister s, Condition cond = al);
- void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
- // Load/Store instructions
- void ldr(Register dst, const MemOperand& src, Condition cond = al);
- void str(Register src, const MemOperand& dst, Condition cond = al);
- void ldrb(Register dst, const MemOperand& src, Condition cond = al);
- void strb(Register src, const MemOperand& dst, Condition cond = al);
- void ldrh(Register dst, const MemOperand& src, Condition cond = al);
- void strh(Register src, const MemOperand& dst, Condition cond = al);
- void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
- void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-
- // Load/Store multiple instructions
- void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
- void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
- // Semaphore instructions
- void swp(Register dst, Register src, Register base, Condition cond = al);
- void swpb(Register dst, Register src, Register base, Condition cond = al);
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg);
-
- void bkpt(uint32_t imm16); // v5 and above
- void swi(uint32_t imm24, Condition cond = al);
-
- // Coprocessor instructions
-
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
-
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2); // v5 and above
-
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short, Condition cond = al);
- void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short); // v5 and above
- void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- // Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
-
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset, // Offset must be a multiple of 4.
- const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset, // Offset must be a multiple of 4.
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void vmrs(const Register dst,
- const Condition cond = al);
-
- // Pseudo instructions
- void nop() { mov(r0, Operand(r0)); }
-
- void push(Register src, Condition cond = al) {
- str(src, MemOperand(sp, 4, NegPreIndex), cond);
- }
-
- void pop(Register dst, Condition cond = al) {
- ldr(dst, MemOperand(sp, 4, PostIndex), cond);
- }
-
- void pop() {
- add(sp, sp, Operand(kPointerSize));
- }
-
- // Load effective address of memory operand x into register dst
- void lea(Register dst, const MemOperand& x,
- SBit s = LeaveCC, Condition cond = al);
-
- // Jump unconditionally to given label.
- void jmp(Label* L) { b(L, al); }
-
- // Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
- }
-
- // Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Debugging
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --debug_code to enable.
- void RecordComment(const char* msg);
-
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
-
- int pc_offset() const { return pc_ - buffer_; }
- int current_position() const { return current_position_; }
- int current_statement_position() const { return current_statement_position_; }
-
- protected:
- int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Read/patch instructions
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
- // Decode branch instruction at pos and return branch target pos
- int target_at(int pos);
-
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
-
- // Check if is time to emit a constant pool for pending reloc info entries
- void CheckConstPool(bool force_emit, bool require_jump);
-
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
- }
-
- private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
- int next_buffer_check_; // pc offset of next buffer check
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
-
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
- // Emission of the constant pool may be blocked in some code sequences
- int no_const_pool_before_; // block emission before this pc offset
-
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // source position information
- int current_position_;
- int current_statement_position_;
- int written_position_;
- int written_statement_position_;
-
- // Code emission
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
-
- // Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
- // Labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- // Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class RegExpMacroAssemblerARM;
- friend class RelocInfo;
- friend class CodePatcher;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 5718cb3ce2..1f776562f2 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
@@ -130,7 +132,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// of the JSArray.
// result: JSObject
// scratch2: start of next object
- __ lea(scratch1, MemOperand(result, JSArray::kSize));
+ __ add(scratch1, result, Operand(JSArray::kSize));
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
@@ -1311,3 +1313,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 5509830b30..7b62da9d33 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -27,12 +27,15 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "jsregexp.h"
+#include "jump-target-light-inl.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
@@ -40,10 +43,12 @@
#include "runtime.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
+#include "virtual-frame-arm-inl.h"
namespace v8 {
namespace internal {
+
#define __ ACCESS_MASM(masm_)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -274,7 +279,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_.SetExpectedHeight();
function_return_is_shadowed_ = false;
// Generate code to 'execute' declarations and initialize functions
@@ -1143,44 +1148,66 @@ void CodeGenerator::SmiOperation(Token::Value op,
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags
+ uint32_t problematic_mask = kSmiTagMask;
+ // For unsigned shift by zero all negative smis are problematic.
+ if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
+ __ tst(tos, Operand(problematic_mask));
+ deferred->Branch(ne); // Go slow for problematic input.
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
- __ mov(scratch, Operand(scratch, LSL, shift_value));
+ int adjusted_shift = shift_value - kSmiTagSize;
+ ASSERT(adjusted_shift >= 0);
+ if (adjusted_shift != 0) {
+ __ mov(scratch, Operand(tos, LSL, adjusted_shift));
+ // Check that the *signed* result fits in a smi.
+ __ add(scratch2, scratch, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+ } else {
+ // Check that the *signed* result fits in a smi.
+ __ add(scratch2, tos, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+ }
}
- // check that the *signed* result fits in a smi
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
break;
}
case Token::SHR: {
- // LSR by immediate 0 means shifting 32 bits.
if (shift_value != 0) {
+ __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
+ // LSR by immediate 0 means shifting 32 bits.
__ mov(scratch, Operand(scratch, LSR, shift_value));
+ if (shift_value == 1) {
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ tst(scratch, Operand(0xc0000000));
+ deferred->Branch(ne);
+ }
+ __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
}
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ tst(scratch, Operand(0xc0000000));
- deferred->Branch(ne);
break;
}
case Token::SAR: {
+ // In the ARM instructions set, ASR by immediate 0 means shifting 32
+ // bits.
if (shift_value != 0) {
- // ASR by immediate 0 means shifting 32 bits.
- __ mov(scratch, Operand(scratch, ASR, shift_value));
+ // Do the shift and the tag removal in one operation. If the shift
+ // is 31 bits (the highest possible value) then we emit the
+ // instruction as a shift by 0 which means shift arithmetically by
+ // 32.
+ __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
+ // Put tag back.
+ __ mov(tos, Operand(tos, LSL, kSmiTagSize));
}
break;
}
default: UNREACHABLE();
}
- __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1343,6 +1370,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// give us a megamorphic load site. Not super, but it works.
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame_->Dup();
frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
@@ -1549,7 +1577,7 @@ void CodeGenerator::VisitBlock(Block* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
VisitStatementsAndSpill(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1836,7 +1864,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
LoadAndSpill(node->tag());
@@ -1925,7 +1953,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
JumpTarget body(JumpTarget::BIDIRECTIONAL);
IncrementLoopNesting();
@@ -1935,14 +1963,14 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
switch (info) {
case ALWAYS_TRUE:
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
break;
case ALWAYS_FALSE:
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->SetExpectedHeight();
break;
case DONT_KNOW:
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->SetExpectedHeight();
body.Bind();
break;
}
@@ -2006,12 +2034,12 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
if (info == ALWAYS_FALSE) return;
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
// Label the top of the loop with the continue target for the backward
// CFG edge.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
if (info == DONT_KNOW) {
@@ -2060,17 +2088,17 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
if (info == ALWAYS_FALSE) return;
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
if (node->next() == NULL) {
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
} else {
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->SetExpectedHeight();
loop.Bind();
}
@@ -2275,8 +2303,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// sp[4] : enumerable
// Grab the current frame's height for the break and continue
// targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->SetExpectedHeight();
+ node->continue_target()->SetExpectedHeight();
// Load the current count to r0, load the length to r1.
__ ldrd(r0, frame_->ElementAt(0));
@@ -2766,45 +2794,13 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
JumpTarget slow;
JumpTarget done;
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
- // If there was no control flow to slow, we can exit early.
- if (!slow.is_linked()) {
- frame_->EmitPush(r0);
- return;
- }
- frame_->SpillAll();
-
- done.Jump();
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- frame_->SpillAll();
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(potential_slot,
- r1,
- r2,
- &slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- // There is always control flow to slow from
- // ContextSlotOperandCheckExtensions so we have to jump around
- // it.
- done.Jump();
- }
- }
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &slow,
+ &done);
slow.Bind();
VirtualFrame::SpilledScope spilled_scope(frame_);
@@ -3014,8 +3010,67 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT);
- // Drop the global object. The result is in r0.
- frame_->Drop();
+}
+
+
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ frame_->SpillAll();
+ done->Jump();
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ frame_->SpillAll();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ __ ldr(r0,
+ ContextSlotOperandCheckExtensions(potential_slot,
+ r1,
+ r2,
+ slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ }
+ done->Jump();
+ } else if (rewrite != NULL) {
+ // Generate fast case for argument loads.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ __ ldr(r0,
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ r1,
+ r2,
+ slow));
+ frame_->EmitPush(r0);
+ __ mov(r1, Operand(key_literal->handle()));
+ frame_->EmitPush(r1);
+ EmitKeyedLoad();
+ done->Jump();
+ }
+ }
+ }
+ }
}
@@ -3368,7 +3423,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame_->Dup();
}
EmitNamedLoad(name, var != NULL);
- frame_->Drop(); // Receiver is left on the stack.
frame_->EmitPush(r0);
// Perform the binary operation.
@@ -3507,9 +3561,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
- frame_->PopToR0();
EmitKeyedStore(prop->key()->type());
- frame_->Drop(2); // Key and receiver are left on the stack.
frame_->EmitPush(r0);
// Stack layout:
@@ -3705,52 +3757,26 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript examples:
//
- // with (obj) foo(1, 2, 3) // foo is in obj
+ // with (obj) foo(1, 2, 3) // foo may be in obj.
//
// function f() {};
// function g() {
// eval(...);
- // f(); // f could be in extension object
+ // f(); // f could be in extension object.
// }
// ----------------------------------
// JumpTargets do not yet support merging frames so the frame must be
// spilled when jumping to these targets.
- JumpTarget slow;
- JumpTarget done;
+ JumpTarget slow, done;
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow);
- frame_->EmitPush(r0);
- LoadGlobalReceiver(r1);
- done.Jump();
-
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = var->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(potential_slot,
- r1,
- r2,
- &slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- frame_->EmitPush(r0);
- LoadGlobalReceiver(r1);
- done.Jump();
- }
- }
+ // Generate fast case for loading functions from slots that
+ // correspond to local/global variables or arguments unless they
+ // are shadowed by eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow,
+ &done);
slow.Bind();
// Load the function
@@ -3764,7 +3790,18 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r0); // function
frame_->EmitPush(r1); // receiver
- done.Bind();
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ JumpTarget call;
+ call.Jump();
+ done.Bind();
+ frame_->EmitPush(r0); // function
+ LoadGlobalReceiver(r1); // receiver
+ call.Bind();
+ }
+
// Call the function. At this point, everything is spilled but the
// function and receiver are in r0 and r1.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -4892,7 +4929,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -4901,10 +4937,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
- // Postfix: Make room for the result.
if (is_postfix) {
- __ mov(r0, Operand(0));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(0)));
}
// A constant reference is not saved to, so a constant reference is not a
@@ -4914,35 +4948,33 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
if (!is_postfix) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(0)));
}
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
+ // This pushes 0, 1 or 2 words on the object to be used later when updating
+ // the target. It also pushes the current value of the target.
target.GetValue();
- frame_->EmitPop(r0);
JumpTarget slow;
JumpTarget exit;
- // Load the value (1) into register r1.
- __ mov(r1, Operand(Smi::FromInt(1)));
-
// Check for smi operand.
- __ tst(r0, Operand(kSmiTagMask));
+ Register value = frame_->PopToRegister();
+ __ tst(value, Operand(kSmiTagMask));
slow.Branch(ne);
// Postfix: Store the old value as the result.
if (is_postfix) {
- __ str(r0, frame_->ElementAt(target.size()));
+ frame_->SetElementAt(value, target.size());
}
// Perform optimistic increment/decrement.
if (is_increment) {
- __ add(r0, r0, Operand(r1), SetCC);
+ __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
} else {
- __ sub(r0, r0, Operand(r1), SetCC);
+ __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
}
// If the increment/decrement didn't overflow, we're done.
@@ -4950,41 +4982,50 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Revert optimistic increment/decrement.
if (is_increment) {
- __ sub(r0, r0, Operand(r1));
+ __ sub(value, value, Operand(Smi::FromInt(1)));
} else {
- __ add(r0, r0, Operand(r1));
+ __ add(value, value, Operand(Smi::FromInt(1)));
}
- // Slow case: Convert to number.
+ // Slow case: Convert to number. At this point the
+ // value to be incremented is in the value register..
slow.Bind();
+
+ // Convert the operand to a number.
+ frame_->EmitPush(value);
+
{
- // Convert the operand to a number.
- frame_->EmitPush(r0);
+ VirtualFrame::SpilledScope spilled(frame_);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
- }
- if (is_postfix) {
- // Postfix: store to result (on the stack).
- __ str(r0, frame_->ElementAt(target.size()));
- }
- // Compute the new value.
- __ mov(r1, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
- frame_->EmitPush(r1);
- if (is_increment) {
- frame_->CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- frame_->CallRuntime(Runtime::kNumberSub, 2);
+ if (is_postfix) {
+ // Postfix: store to result (on the stack).
+ __ str(r0, frame_->ElementAt(target.size()));
+ }
+
+ // Compute the new value.
+ frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(1)));
+ if (is_increment) {
+ frame_->CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ frame_->CallRuntime(Runtime::kNumberSub, 2);
+ }
}
+ __ Move(value, r0);
// Store the new value in the target if not const.
+ // At this point the answer is in the value register.
exit.Bind();
- frame_->EmitPush(r0);
+ frame_->EmitPush(value);
+ // Set the target with the result, leaving the result on
+ // top of the stack. Removes the target from the stack if
+ // it has a non-zero size.
if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: Discard the new value and use the old.
- if (is_postfix) frame_->EmitPop(r0);
+ if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -5387,26 +5428,30 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
- explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
+ explicit DeferredReferenceGetNamedValue(Register receiver,
+ Handle<String> name)
+ : receiver_(receiver), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
private:
+ Register receiver_;
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
+ ASSERT(receiver_.is(r0) || receiver_.is(r1));
+
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
- // Setup the registers and call load IC.
- // On entry to this deferred code, r0 is assumed to already contain the
- // receiver from the top of the stack.
+ // Ensure receiver in r0 and name in r2 to match load ic calling convention.
+ __ Move(r0, receiver_);
__ mov(r2, Operand(name_));
// The rest of the instructions in the deferred code must be together.
@@ -5427,20 +5472,34 @@ void DeferredReferenceGetNamedValue::Generate() {
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
- DeferredReferenceGetKeyedValue() {
+ DeferredReferenceGetKeyedValue(Register key, Register receiver)
+ : key_(key), receiver_(receiver) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
virtual void Generate();
+
+ private:
+ Register key_;
+ Register receiver_;
};
void DeferredReferenceGetKeyedValue::Generate() {
+ ASSERT((key_.is(r0) && receiver_.is(r1)) ||
+ (key_.is(r1) && receiver_.is(r0)));
+
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
+ // Ensure key in r0 and receiver in r1 to match keyed load ic calling
+ // convention.
+ if (key_.is(r1)) {
+ __ Swap(r0, r1, ip);
+ }
+
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has the arguments key and receiver in r0 and r1.
@@ -5460,11 +5519,19 @@ void DeferredReferenceGetKeyedValue::Generate() {
class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
- DeferredReferenceSetKeyedValue() {
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
virtual void Generate();
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
};
@@ -5475,10 +5542,17 @@ void DeferredReferenceSetKeyedValue::Generate() {
__ IncrementCounter(
&Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
+ // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
+ // calling convention.
+ if (value_.is(r1)) {
+ __ Swap(r0, r1, ip);
+ }
+ ASSERT(receiver_.is(r2));
+
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed load IC. It has receiver amd key on the stack and the value to
- // store in r0.
+ // Call keyed store IC. It has the arguments value, key and receiver in r0,
+ // r1 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
@@ -5516,10 +5590,11 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// this code
// Load the receiver from the stack.
- frame_->SpillAllButCopyTOSToR0();
+ Register receiver = frame_->PopToRegister();
+ VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(name);
+ new DeferredReferenceGetNamedValue(receiver, name);
#ifdef DEBUG
int kInlinedNamedLoadInstructions = 7;
@@ -5529,19 +5604,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Check that the receiver is a heap object.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
- __ ldr(r0, MemOperand(r0, 0));
+ __ ldr(r0, MemOperand(receiver, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
@@ -5576,15 +5651,14 @@ void CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
- // Load the key and receiver from the stack to r0 and r1.
- frame_->PopToR1R0();
- Register receiver = r0;
- Register key = r1;
+ // Load the key and receiver from the stack.
+ Register key = frame_->PopToRegister();
+ Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
- // The deferred code expects key and receiver in r0 and r1.
+ // The deferred code expects key and receiver in registers.
DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue();
+ new DeferredReferenceGetKeyedValue(key, receiver);
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@@ -5594,17 +5668,16 @@ void CodeGenerator::EmitKeyedLoad() {
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
-#ifdef DEBUG
- int kInlinedKeyedLoadInstructions = 19;
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+#ifdef DEBUG
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
@@ -5632,17 +5705,15 @@ void CodeGenerator::EmitKeyedLoad() {
__ add(scratch1,
scratch1,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0,
+ __ ldr(scratch1,
MemOperand(scratch1, key, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
- __ cmp(r0, scratch2);
- // This is the only branch to deferred where r0 and r1 do not contain the
- // receiver and key. We can't just load undefined here because we have to
- // check the prototype.
+ __ cmp(scratch1, scratch2);
deferred->Branch(eq);
+ __ mov(r0, scratch1);
// Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedLoadInstructions,
+ ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@@ -5652,78 +5723,86 @@ void CodeGenerator::EmitKeyedLoad() {
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
- VirtualFrame::SpilledScope scope(frame_);
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
// Inline the keyed store.
Comment cmnt(masm_, "[ Inlined store to keyed property");
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue();
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+ Register scratch3 = r3;
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_store_inline, 1,
- frame_->scratch0(), frame_->scratch1());
+ scratch1, scratch2);
+
+ // Load the value, key and receiver from the stack.
+ Register value = frame_->PopToRegister();
+ Register key = frame_->PopToRegister(value);
+ Register receiver = r2;
+ frame_->EmitPop(receiver);
+ VirtualFrame::SpilledScope spilled(frame_);
+
+ // The deferred code expects value, key and receiver in registers.
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(value, key, receiver);
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(value, Operand(kSmiTagMask));
deferred->Branch(ne);
- // Load the key and receiver from the stack.
- __ ldr(r1, MemOperand(sp, 0));
- __ ldr(r2, MemOperand(sp, kPointerSize));
-
// Check that the key is a smi.
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
// Check that the receiver is a heap object.
- __ tst(r2, Operand(kSmiTagMask));
+ __ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check that the receiver is a JSArray.
- __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+ __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
deferred->Branch(ne);
// Check that the key is within bounds. Both the key and the length of
// the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ cmp(r3, r1);
+ __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ cmp(scratch1, key);
deferred->Branch(ls); // Unsigned less equal.
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
-#ifdef DEBUG
- int kInlinedKeyedStoreInstructions = 7;
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Get the elements array from the receiver and check that it
// is not a dictionary.
- __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
- __ mov(r5, Operand(Factory::fixed_array_map()));
- __ cmp(r4, r5);
+#ifdef DEBUG
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
+ __ mov(scratch3, Operand(Factory::fixed_array_map()));
+ __ cmp(scratch2, scratch3);
deferred->Branch(ne);
// Store the value.
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r3, r1, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+ __ add(scratch1, scratch1,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(value,
+ MemOperand(scratch1, key, LSL,
+ kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
// Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedStoreInstructions,
+ ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@@ -5786,19 +5865,20 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+ if (persist_after_get_) {
+ cgen_->frame()->Dup();
+ }
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
+ if (!persist_after_get_) set_unloaded();
break;
}
case KEYED: {
+ ASSERT(property != NULL);
if (persist_after_get_) {
cgen_->frame()->Dup2();
}
- ASSERT(property != NULL);
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) set_unloaded();
@@ -5839,16 +5919,13 @@ void Reference::SetValue(InitState init_state) {
}
case KEYED: {
- VirtualFrame::SpilledScope scope(frame);
Comment cmnt(masm, "[ Store to keyed Property");
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
-
- frame->EmitPop(r0); // Value.
cgen_->EmitKeyedStore(property->key()->type());
frame->EmitPush(r0);
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
@@ -8486,9 +8563,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_NATIVE_REGEXP
+#else // V8_INTERPRETED_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
@@ -8598,7 +8675,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, kFixedArrayMapRootIndex);
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
@@ -8821,7 +8898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_NATIVE_REGEXP
+#endif // V8_INTERPRETED_REGEXP
}
@@ -9967,3 +10044,5 @@ void StringAddStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index bb76b633bd..361ea131de 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -29,6 +29,7 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "ic-inl.h"
+#include "ast.h"
namespace v8 {
namespace internal {
@@ -36,6 +37,7 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
+class JumpTarget;
class RegisterAllocator;
class RegisterFile;
@@ -217,6 +219,10 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+ // Constants related to patching of inlined load/store.
+ static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -309,6 +315,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
+
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
@@ -338,6 +345,15 @@ class CodeGenerator: public AstVisitor {
TypeofState typeof_state,
JumpTarget* slow);
+ // Support for loading from local/global variables and arguments
+ // whose location is known unless they are shadowed by
+ // eval-introduced bindings. Generates no code for unsupported slot
+ // types and therefore expects to fall through to the slow jump target.
+ void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow,
+ JumpTarget* done);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 2e371207e0..4e186d1382 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "constants-arm.h"
@@ -128,3 +130,5 @@ int Registers::Number(const char* name) {
} } // namespace assembler::arm
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index d50c2038aa..3d3e6ae9d4 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -32,6 +32,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "cpu.h"
#include "macro-assembler.h"
@@ -136,3 +138,5 @@ void CPU::DebugBreak() {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index d02ba764f8..69fc504e7f 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "debug.h"
@@ -170,10 +172,11 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- sp[0] : key
- // -- sp[4] : receiver
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
}
@@ -237,3 +240,5 @@ const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 4051096fca..0ac7d19f66 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -56,6 +56,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "constants-arm.h"
#include "disasm.h"
#include "macro-assembler.h"
@@ -1356,3 +1358,5 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
} // namespace disasm
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 5dedc29ab9..48eaf46aaf 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "scopes.h"
@@ -236,3 +238,5 @@ void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 0cb7f12302..271e4a6f0a 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -27,12 +27,10 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "frames-inl.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2-inl.h"
-#else
#include "arm/assembler-arm-inl.h"
-#endif
namespace v8 {
@@ -121,3 +119,5 @@ Address InternalFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 6680af9a97..c2f6ea96bd 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -397,10 +399,10 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kValue: {
Label done;
__ bind(materialize_true);
- __ mov(result_register(), Operand(Factory::true_value()));
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(materialize_false);
- __ mov(result_register(), Operand(Factory::false_value()));
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ bind(&done);
switch (location_) {
case kAccumulator:
@@ -417,7 +419,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kValueTest:
__ bind(materialize_true);
- __ mov(result_register(), Operand(Factory::true_value()));
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
switch (location_) {
case kAccumulator:
break;
@@ -430,7 +432,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTestValue:
__ bind(materialize_false);
- __ mov(result_register(), Operand(Factory::false_value()));
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
switch (location_) {
case kAccumulator:
break;
@@ -640,11 +642,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ pop(r1); // Key.
+ __ pop(r2); // Receiver.
__ Call(ic, RelocInfo::CODE_TARGET);
- // Value in r0 is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
+ // Value in r0 is ignored (declarations are statements).
}
}
}
@@ -661,19 +663,29 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
- // Build the shared function info and instantiate the function based
- // on it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script(), this);
- if (HasStackOverflow()) return;
- // Create a new closure.
- __ mov(r0, Operand(function_info));
- __ stm(db_w, sp, cp.bit() | r0.bit());
- __ CallRuntime(Runtime::kNewClosure, 2);
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && info->num_literals() == 0) {
+ FastNewClosureStub stub;
+ __ mov(r0, Operand(info));
+ __ push(r0);
+ __ CallStub(&stub);
+ } else {
+ __ mov(r0, Operand(info));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ }
Apply(context_, r0);
}
@@ -695,13 +707,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
- // object on the stack.
+ // object (receiver) in r0.
__ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndApply(1, context, r0);
+ Apply(context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@@ -904,7 +915,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() != Token::INIT_CONST);
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -984,6 +1001,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op(),
context_);
break;
case NAMED_PROPERTY:
@@ -1000,7 +1018,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
- __ ldr(r0, MemOperand(sp, 0));
+ // Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
@@ -1024,14 +1042,13 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op,
Expression::Context context) {
- // Three main cases: global variables, lookup slots, and all other
- // types of slots. Left-hand-side parameters that rewrite to
- // explicit property accesses do not reach here.
+ // Left-hand sides that rewrite to explicit property accesses do not reach
+ // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
- Slot* slot = var->slot();
if (var->is_global()) {
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
@@ -1042,43 +1059,61 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ mov(r1, Operand(var->name()));
- __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
-
- } else if (var->slot() != NULL) {
+ } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+ // Perform the assignment for non-const variables and for initialization
+ // of const variables. Const assignments are simply skipped.
+ Label done;
Slot* slot = var->slot();
switch (slot->type()) {
- case Slot::LOCAL:
case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &done);
+ }
+ // Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, target);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &done);
+ }
+ // Perform the assignment and issue the write barrier.
__ str(result_register(), target);
-
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-
__ mov(r2, Operand(offset));
__ RecordWrite(r1, r2, r3);
break;
}
case Slot::LOOKUP:
- UNREACHABLE();
+ // Call the runtime for the assignment. The runtime will ignore
+ // const reinitialization.
+ __ push(r0); // Value.
+ __ mov(r0, Operand(slot->var()->name()));
+ __ Push(cp, r0); // Context and name.
+ if (op == Token::INIT_CONST) {
+ // The runtime will ignore const redeclaration.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
break;
}
-
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
+ __ bind(&done);
}
+
Apply(context, result_register());
}
@@ -1103,6 +1138,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ // Load receiver to r1. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
if (expr->ends_initialization_block()) {
__ ldr(r1, MemOperand(sp));
} else {
@@ -1115,7 +1152,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ // Receiver is under the result value.
+ __ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
@@ -1143,21 +1181,30 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
+ __ pop(r1); // Key.
+ // Load receiver to r2. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
+ if (expr->ends_initialization_block()) {
+ __ ldr(r2, MemOperand(sp));
+ } else {
+ __ pop(r2);
+ }
+
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ // Receiver is under the result value.
+ __ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
+ DropAndApply(1, context_, r0);
+ } else {
+ Apply(context_, r0);
}
-
- // Receiver and key are still on stack.
- DropAndApply(2, context_, r0);
}
@@ -1165,14 +1212,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- // Evaluate receiver.
- VisitForValue(expr->obj(), kStack);
-
if (key->IsPropertyName()) {
+ VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
- // Drop receiver left on the stack by IC.
- DropAndApply(1, context_, r0);
+ Apply(context_, r0);
} else {
+ VisitForValue(expr->obj(), kStack);
VisitForValue(expr->key(), kAccumulator);
__ pop(r1);
EmitKeyedPropertyLoad(expr);
@@ -1445,13 +1490,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
__ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
__ mov(r2, Operand(proxy->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
- __ str(r0, MemOperand(sp));
+ __ push(r0);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@@ -1557,10 +1601,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(ip, Operand(Smi::FromInt(0)));
__ push(ip);
}
- VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(r0);
EmitNamedPropertyLoad(prop);
} else {
+ VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
@@ -1631,6 +1678,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case VARIABLE:
if (expr->is_postfix()) {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
Expression::kEffect);
// For all contexts except kEffect: We have the result on
// top of the stack.
@@ -1639,6 +1687,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
context_);
}
break;
@@ -1657,15 +1706,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
+ __ pop(r1); // Key.
+ __ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(2, context_, r0);
+ Apply(context_, r0);
}
break;
}
@@ -1877,3 +1927,5 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 34ba5e5f78..ba318fd2ec 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -27,7 +27,10 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "assembler-arm.h"
+#include "codegen.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
@@ -639,7 +642,9 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Patch the map check.
Address ldr_map_instr_address =
- inline_end_address - 18 * Assembler::kInstrSize;
+ inline_end_address -
+ (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+ Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
@@ -669,7 +674,9 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
// Patch the map check.
Address ldr_map_instr_address =
- inline_end_address - 5 * Assembler::kInstrSize;
+ inline_end_address -
+ (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
+ Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
@@ -1204,13 +1211,13 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- sp[0] : key
- // -- sp[1] : receiver
// -----------------------------------
- __ ldm(ia, sp, r2.bit() | r3.bit());
- __ Push(r3, r2, r0);
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
@@ -1220,12 +1227,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- sp[0] : key
- // -- sp[1] : receiver
// -----------------------------------
- __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
- __ Push(r3, r1, r0);
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@@ -1234,147 +1242,135 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- sp[0] : key
- // -- sp[1] : receiver
// -----------------------------------
- Label slow, fast, array, extra, exit, check_pixel_array;
+ Label slow, fast, array, extra, check_pixel_array;
+
+ // Register usage.
+ Register value = r0;
+ Register key = r1;
+ Register receiver = r2;
+ Register elements = r3; // Elements array of the receiver.
+ // r4 and r5 are used as general scratch registers.
- // Get the key and the object from the stack.
- __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(key, Operand(kSmiTagMask));
__ b(ne, &slow);
// Check that the object isn't a smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(receiver, Operand(kSmiTagMask));
__ b(eq, &slow);
// Get the map of the object.
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_ARRAY_TYPE));
- // r1 == key.
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JS object.
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
-
// Object case: Check key against length in the elements array.
- __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r2, ip);
+ __ cmp(r4, ip);
__ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ __ mov(r4, Operand(key, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
- __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ cmp(r1, Operand(ip));
+ __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(r4, Operand(ip));
__ b(lo, &fast);
-
- // Slow case:
+ // Slow case, handle jump to runtime.
__ bind(&slow);
+ // Entry registers are intact.
+ // r0: value.
+ // r1: key.
+ // r2: receiver.
GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
- // r0: value
- // r1: index (as a smi), zero-extended.
- // r3: elements array
+ // r4: elements map.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(r2, ip);
+ __ cmp(r4, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
- __ BranchOnNotSmi(r0, &slow);
- __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key.
- __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
- __ cmp(r1, Operand(ip));
+ __ BranchOnNotSmi(value, &slow);
+ __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
+ __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
+ __ cmp(r4, Operand(ip));
__ b(hs, &slow);
- __ mov(r4, r0); // Save the value.
- __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
+ __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
{ // Clamp the value to [0..255].
Label done;
- __ tst(r0, Operand(0xFFFFFF00));
+ __ tst(r5, Operand(0xFFFFFF00));
__ b(eq, &done);
- __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative.
- __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive.
+ __ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative.
+ __ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive.
__ bind(&done);
}
- __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
- __ strb(r0, MemOperand(r2, r1));
- __ mov(r0, Operand(r4)); // Return the original value.
+ // Get the pointer to the external array. This clobbers elements.
+ __ ldr(elements,
+ FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+ __ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
__ Ret();
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
- // r0 == value, r1 == key, r2 == elements, r3 == object
__ bind(&extra);
- __ b(ne, &slow); // do not leave holes in the array
- __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
- __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
- __ cmp(r1, Operand(ip));
+ // Condition code from comparing key and array length is still available.
+ __ b(ne, &slow); // Only support writing to writing to array[array.length].
+ // Check for room in the elements backing store.
+ __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
+ __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(r4, Operand(ip));
__ b(hs, &slow);
- __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
- __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
- __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ mov(r3, Operand(r2));
- // NOTE: Computing the address to store into must take the fact
- // that the key has been incremented into account.
- int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
- ((1 << kSmiTagSize) * 2);
- __ add(r2, r2, Operand(displacement));
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ // Calculate key + 1 as smi.
+ ASSERT_EQ(0, kSmiTag);
+ __ add(r4, key, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast);
-
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
- // r0 == value, r3 == object
__ bind(&array);
- __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r1, ip);
+ __ cmp(r4, ip);
__ b(ne, &slow);
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ ldr(r1, MemOperand(sp)); // restore key
- // r0 == value, r1 == key, r2 == elements, r3 == object.
- __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ cmp(r1, Operand(ip));
+ // Check the key against the length in the array.
+ __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ cmp(key, Operand(ip));
__ b(hs, &extra);
- __ mov(r3, Operand(r2));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
-
+ // Fall through to fast case.
- // Fast case: Do the store.
- // r0 == value, r2 == address to store into, r3 == elements
__ bind(&fast);
- __ str(r0, MemOperand(r2));
+ // Fast case, store the value to the elements backing store.
+ __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(r5));
// Skip write barrier if the written value is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ tst(value, Operand(kSmiTagMask));
+ __ Ret(eq);
// Update write barrier for the elements array address.
- __ sub(r1, r2, Operand(r3));
- __ RecordWrite(r3, r1, r2);
+ __ sub(r4, r5, Operand(elements));
+ __ RecordWrite(elements, r4, r5);
- __ bind(&exit);
__ Ret();
}
@@ -1468,20 +1464,23 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- sp[0] : key
- // -- sp[1] : receiver
// -----------------------------------
Label slow, check_heap_number;
- // Get the key and the object from the stack.
- __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver
+ // Register usage.
+ Register value = r0;
+ Register key = r1;
+ Register receiver = r2;
+ // r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
- __ BranchOnSmi(r2, &slow);
+ __ BranchOnSmi(receiver, &slow);
- // Check that the object is a JS object. Load map into r3
- __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is a JS object. Load map into r3.
+ __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
@@ -1491,73 +1490,70 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ b(ne, &slow);
// Check that the key is a smi.
- __ BranchOnNotSmi(r1, &slow);
+ __ BranchOnNotSmi(key, &slow);
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // r0: value
- // r1: index (smi)
- // r2: object
- __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ // Check that the elements array is the appropriate type of ExternalArray.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r3, ip);
+ __ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
- __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index.
- __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
- __ cmp(r1, ip);
+ __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
+ __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+ __ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
- // r0: value
- // r1: index (integer)
- // r2: array
- __ BranchOnNotSmi(r0, &check_heap_number);
- __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
- __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
-
- // r1: index (integer)
- // r2: base pointer of external storage
- // r3: value (integer)
+ // r3: external array.
+ // r4: key (integer).
+ __ BranchOnNotSmi(value, &check_heap_number);
+ __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
+ // r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ strb(r3, MemOperand(r2, r1, LSL, 0));
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ strh(r3, MemOperand(r2, r1, LSL, 1));
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ str(r3, MemOperand(r2, r1, LSL, 2));
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
- ConvertIntToFloat(masm, r3, r4, r5, r6);
- __ str(r4, MemOperand(r2, r1, LSL, 2));
+ ConvertIntToFloat(masm, r5, r6, r7, r9);
+ __ str(r6, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
- // r0: value
+ // Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
- // r0: value
- // r1: index (integer)
- // r2: external array object
+ // r3: external array.
+ // r4: index (integer).
__ bind(&check_heap_number);
- __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
@@ -1567,13 +1563,13 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r3, HeapNumber::kValueOffset);
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
__ vcvt_f32_f64(s0, d0);
- __ vmov(r3, s0);
- __ str(r3, MemOperand(r2, r1, LSL, 2));
+ __ vmov(r5, s0);
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
Label done;
@@ -1582,38 +1578,38 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
- __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0
+ __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
__ b(vs, &done);
- // Test whether exponent equal to 0x7FF (infinity or NaN)
- __ vmov(r4, r3, d0);
+ // Test whether exponent equal to 0x7FF (infinity or NaN).
+ __ vmov(r6, r7, d0);
__ mov(r5, Operand(0x7FF00000));
- __ and_(r3, r3, Operand(r5));
- __ teq(r3, Operand(r5));
- __ mov(r3, Operand(0), LeaveCC, eq);
+ __ and_(r6, r6, Operand(r5));
+ __ teq(r6, Operand(r5));
+ __ mov(r6, Operand(0), LeaveCC, eq);
- // Not infinity or NaN simply convert to int
+ // Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, ne);
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
- __ vmov(r3, s0, ne);
+ __ vmov(r5, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ strb(r3, MemOperand(r2, r1, LSL, 0));
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ strh(r3, MemOperand(r2, r1, LSL, 1));
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ str(r3, MemOperand(r2, r1, LSL, 2));
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
@@ -1621,12 +1617,12 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
}
- // r0: original value
+ // Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
- // VFP3 is not available do manual conversions
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ // VFP3 is not available do manual conversions.
+ __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
@@ -1638,106 +1634,108 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
- __ mov(r5, Operand(HeapNumber::kExponentMask));
- __ and_(r6, r3, Operand(r5), SetCC);
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
- __ teq(r6, Operand(r5));
- __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
+ __ teq(r9, Operand(r7));
+ __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
- __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
- __ add(r6,
- r6,
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ add(r9,
+ r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
- __ cmp(r6, Operand(kBinary32MaxExponent));
- __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
+ __ cmp(r9, Operand(kBinary32MaxExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+ __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
- __ cmp(r6, Operand(kBinary32MinExponent));
- __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+ __ cmp(r9, Operand(kBinary32MinExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
- __ and_(r7, r3, Operand(HeapNumber::kSignMask));
- __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
- __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
- __ str(r3, MemOperand(r2, r1, LSL, 2));
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r3, Operand(HeapNumber::kSignMask));
- __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
- __ orr(r6, r6, r7);
- __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
- __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r9, r9, r7);
+ __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
- bool is_signed_type = IsElementTypeSigned(array_type);
+ bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
- __ mov(r5, Operand(HeapNumber::kExponentMask));
- __ and_(r6, r3, Operand(r5), SetCC);
- __ mov(r3, Operand(0), LeaveCC, eq);
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ mov(r5, Operand(0), LeaveCC, eq);
__ b(eq, &done);
- __ teq(r6, Operand(r5));
- __ mov(r3, Operand(0), LeaveCC, eq);
+ __ teq(r9, Operand(r7));
+ __ mov(r5, Operand(0), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
- __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
- __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
- __ mov(r3, Operand(0), LeaveCC, mi);
+ __ mov(r5, Operand(0), LeaveCC, mi);
__ b(mi, &done);
- // If exponent is too big than result is minimal value
- __ cmp(r6, Operand(meaningfull_bits - 1));
- __ mov(r3, Operand(min_value), LeaveCC, ge);
+ // If exponent is too big than result is minimal value.
+ __ cmp(r9, Operand(meaningfull_bits - 1));
+ __ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
- __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
- __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
- __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
- __ rsb(r6, r6, Operand(0));
- __ mov(r3, Operand(r3, LSL, r6));
- __ rsb(r6, r6, Operand(meaningfull_bits));
- __ orr(r3, r3, Operand(r4, LSR, r6));
+ __ rsb(r9, r9, Operand(0));
+ __ mov(r5, Operand(r5, LSL, r9));
+ __ rsb(r9, r9, Operand(meaningfull_bits));
+ __ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
- __ teq(r5, Operand(0));
- __ rsb(r3, r3, Operand(0), LeaveCC, ne);
+ __ teq(r7, Operand(0));
+ __ rsb(r5, r5, Operand(0), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ strb(r3, MemOperand(r2, r1, LSL, 0));
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ strh(r3, MemOperand(r2, r1, LSL, 1));
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ str(r3, MemOperand(r2, r1, LSL, 2));
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
@@ -1748,6 +1746,11 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
+
+ // Entry registers are intact.
+ // r0: value
+ // r1: key
+ // r2: receiver
GenerateRuntimeSetProperty(masm);
}
@@ -1838,3 +1841,5 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
index a13de0e2a6..3c43d168d8 100644
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ b/deps/v8/src/arm/jump-target-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@@ -47,28 +49,15 @@ void JumpTarget::DoJump() {
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
- if (is_bound()) {
- // Backward jump. There already a frame expectation at the target.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->MergeTo(entry_frame_);
+ if (entry_frame_set_) {
+ // There already a frame expectation at the target.
+ cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
} else {
- // Use the current frame as the expected one at the target if necessary.
- if (entry_frame_ == NULL) {
- entry_frame_ = cgen()->frame();
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- } else {
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- }
-
- // The predicate is_linked() should be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- if (!is_linked()) {
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
- }
+ // Clone the current frame to use as the expected one at the target.
+ set_entry_frame(cgen()->frame());
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
}
__ jmp(&entry_label_);
}
@@ -77,23 +66,19 @@ void JumpTarget::DoJump() {
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
+ if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
- cgen()->frame()->MergeTo(entry_frame_);
- } else {
- // Clone the current frame to use as the expected one at the target if
- // necessary.
- if (entry_frame_ == NULL) {
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- // The predicate is_linked() should be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- if (!is_linked()) {
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
+ if (cc == al) {
+ cgen()->frame()->MergeTo(&entry_frame_);
+ } else {
+ // We can't do conditional merges yet so you have to ensure that all
+ // conditional branches to the JumpTarget have the same virtual frame.
+ ASSERT(cgen()->frame()->Equals(&entry_frame_));
}
+ } else {
+ // Clone the current frame to use as the expected one at the target.
+ set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
}
@@ -113,15 +98,10 @@ void JumpTarget::Call() {
// Calls are always 'forward' so we use a copy of the current frame (plus
// one for a return address) as the expected frame.
- ASSERT(entry_frame_ == NULL);
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- entry_frame_ = target_frame;
-
- // The predicate is_linked() should now be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
+ ASSERT(!entry_frame_set_);
+ VirtualFrame target_frame = *cgen()->frame();
+ target_frame.Adjust(1);
+ set_entry_frame(&target_frame);
__ bl(&entry_label_);
}
@@ -136,77 +116,27 @@ void JumpTarget::DoBind() {
if (cgen()->has_valid_frame()) {
// If there is a current frame we can use it on the fall through.
- if (entry_frame_ == NULL) {
- entry_frame_ = new VirtualFrame(cgen()->frame());
+ if (!entry_frame_set_) {
+ entry_frame_ = *cgen()->frame();
+ entry_frame_set_ = true;
} else {
- ASSERT(cgen()->frame()->Equals(entry_frame_));
+ cgen()->frame()->MergeTo(&entry_frame_);
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
- ASSERT(entry_frame_ != NULL);
+ ASSERT(entry_frame_set_);
RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // The predicate is_linked() should be made false. Its implementation
- // detects the presence (or absence) of frame pointers in the
- // reaching_frames_ list. If we inserted a bogus frame to make
- // is_linked() true, remove it now.
- if (is_linked()) {
- reaching_frames_.Clear();
+ cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
}
__ bind(&entry_label_);
}
-void BreakTarget::Jump() {
- // On ARM we do not currently emit merge code for jumps, so we need to do
- // it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->Drop(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- UNIMPLEMENTED();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- // On ARM we do not currently emit merge code at binding sites, so we need
- // to do it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- cgen()->frame()->Drop(count);
- }
-
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
- UNIMPLEMENTED();
-}
-
-
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index c4b153f82e..e356d55e1b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
@@ -1725,3 +1727,5 @@ void CodePatcher::Emit(Address addr) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 64fe5d69c1..e8910f4860 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "unicode.h"
#include "log.h"
#include "ast.h"
@@ -1255,3 +1258,5 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/arm/register-allocator-arm.cc
index ad0c7f9d46..3b35574da3 100644
--- a/deps/v8/src/arm/register-allocator-arm.cc
+++ b/deps/v8/src/arm/register-allocator-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
@@ -57,3 +59,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e4601f3e3f..e72a8796dc 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -29,6 +29,8 @@
#include <cstdarg>
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "disasm.h"
#include "assembler.h"
#include "arm/constants-arm.h"
@@ -2731,3 +2733,5 @@ uintptr_t Simulator::PopAddress() {
} } // namespace assembler::arm
#endif // __arm__
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 877354ccae..8001cd842a 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@@ -506,8 +508,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ASSERT(callback->getter() != NULL);
Label cleanup;
- __ pop(scratch2);
- __ Push(receiver, scratch2);
+ __ push(receiver);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
lookup->holder(), scratch1,
@@ -526,9 +527,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
- __ pop(scratch1);
__ pop(scratch2);
- __ push(scratch1);
}
}
@@ -1618,15 +1617,11 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
- // -- r2 : name
+ // -- r0 : receiver
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- // Load receiver.
- __ ldr(r0, MemOperand(sp, 0));
-
// Check that receiver is not a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
@@ -1663,14 +1658,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
int index,
String* name) {
// ----------- S t a t e -------------
+ // -- r0 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
-
GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1685,13 +1678,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- r0 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
@@ -1710,14 +1702,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Object* value,
String* name) {
// ----------- S t a t e -------------
+ // -- r0 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
-
GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1731,14 +1721,12 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- r0 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
-
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
@@ -1764,10 +1752,9 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
+ // -- r0 : receiver
// -- r2 : name
// -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Label miss;
@@ -1974,32 +1961,31 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
- // -- r2 : name
+ // -- r1 : key
+ // -- r2 : receiver
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+ __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
// Check that the name has not changed.
- __ cmp(r2, Operand(Handle<String>(name)));
+ __ cmp(r1, Operand(Handle<String>(name)));
__ b(ne, &miss);
- // Load receiver from the stack.
- __ ldr(r3, MemOperand(sp));
- // r1 is used as scratch register, r3 and r2 might be clobbered.
+ // r3 is used as scratch register. r1 and r2 keep their values if a jump to
+ // the miss label is generated.
GenerateStoreField(masm(),
object,
index,
transition,
- r3, r2, r1,
+ r2, r1, r3,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
- __ mov(r2, Operand(Handle<String>(name))); // restore name register.
+ __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2153,3 +2139,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/virtual-frame-arm-inl.h b/deps/v8/src/arm/virtual-frame-arm-inl.h
new file mode 100644
index 0000000000..a97cde4f75
--- /dev/null
+++ b/deps/v8/src/arm/virtual-frame-arm-inl.h
@@ -0,0 +1,53 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
+#define V8_VIRTUAL_FRAME_ARM_INL_H_
+
+#include "assembler-arm.h"
+#include "virtual-frame-arm.h"
+
+namespace v8 {
+namespace internal {
+
+// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
+// file if such a thing existed.
+MemOperand VirtualFrame::ParameterAt(int index) {
+ // Index -1 corresponds to the receiver.
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index <= parameter_count());
+ return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
+}
+
+ // The receiver frame slot.
+MemOperand VirtualFrame::Receiver() {
+ return ParameterAt(-1);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 0ec6e203d2..3acd2df478 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_ARM)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -72,8 +74,15 @@ void VirtualFrame::PopToR0() {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
if (Equals(expected)) return;
+ MergeTOSTo(expected->top_of_stack_state_);
+ ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+}
+
+
+void VirtualFrame::MergeTOSTo(
+ VirtualFrame::TopOfStack expected_top_of_stack_state) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
- switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
+ switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
@@ -154,7 +163,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
UNREACHABLE();
#undef CASE_NUMBER
}
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+ top_of_stack_state_ = expected_top_of_stack_state;
}
@@ -300,7 +309,8 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- SpillAllButCopyTOSToR0();
+ PopToR0();
+ SpillAll();
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
@@ -330,8 +340,10 @@ void VirtualFrame::CallKeyedLoadIC() {
void VirtualFrame::CallKeyedStoreIC() {
- ASSERT(SpilledScope::is_spilled());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ PopToR1R0();
+ SpillAll();
+ EmitPop(r2);
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
@@ -418,7 +430,7 @@ void VirtualFrame::Pop() {
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(!is_used(reg));
+ ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ pop(reg);
} else {
@@ -498,36 +510,40 @@ Register VirtualFrame::Peek() {
void VirtualFrame::Dup() {
- AssertIsNotSpilled();
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- top_of_stack_state_ = R0_TOS;
- break;
- case R0_TOS:
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_R1_TOS:
- __ push(r1);
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ push(r0);
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- default:
- UNREACHABLE();
+ if (SpilledScope::is_spilled()) {
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ } else {
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ __ ldr(r0, MemOperand(sp, 0));
+ top_of_stack_state_ = R0_TOS;
+ break;
+ case R0_TOS:
+ __ mov(r1, r0);
+ // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ case R1_TOS:
+ __ mov(r0, r1);
+ // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ case R0_R1_TOS:
+ __ push(r1);
+ __ mov(r1, r0);
+ // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ case R1_R0_TOS:
+ __ push(r0);
+ __ mov(r0, r1);
+ // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ default:
+ UNREACHABLE();
+ }
}
element_count_++;
}
@@ -576,7 +592,6 @@ Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
- AssertIsNotSpilled();
element_count_--;
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
@@ -628,6 +643,39 @@ void VirtualFrame::EmitPush(Register reg) {
}
+void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+ if (this_far_down == 0) {
+ Pop();
+ Register dest = GetTOSRegister();
+ if (dest.is(reg)) {
+ // We already popped one item off the top of the stack. If the only
+ // free register is the one we were asked to push then we have been
+ // asked to push a register that was already in use, which cannot
+ // happen. It therefore folows that there are two free TOS registers:
+ ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+ dest = dest.is(r0) ? r1 : r0;
+ }
+ __ mov(dest, reg);
+ EmitPush(dest);
+ } else if (this_far_down == 1) {
+ int virtual_elements = kVirtualElements[top_of_stack_state_];
+ if (virtual_elements < 2) {
+ __ str(reg, ElementAt(this_far_down));
+ } else {
+ ASSERT(virtual_elements == 2);
+ ASSERT(!reg.is(r0));
+ ASSERT(!reg.is(r1));
+ Register dest = kBottomRegister[top_of_stack_state_];
+ __ mov(dest, reg);
+ }
+ } else {
+ ASSERT(this_far_down >= 2);
+ ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
+ __ str(reg, ElementAt(this_far_down));
+ }
+}
+
+
Register VirtualFrame::GetTOSRegister() {
if (SpilledScope::is_spilled()) return r0;
@@ -710,3 +758,5 @@ void VirtualFrame::SpillAll() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index b255929e2b..9471d61e1b 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -29,11 +29,14 @@
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
-#include "scopes.h"
namespace v8 {
namespace internal {
+// This dummy class is only used to create invalid virtual frames.
+extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
+
+
// -------------------------------------------------------------------------
// Virtual frames
//
@@ -82,26 +85,8 @@ class VirtualFrame : public ZoneObject {
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
- explicit RegisterAllocationScope(CodeGenerator* cgen)
- : cgen_(cgen),
- old_is_spilled_(SpilledScope::is_spilled_) {
- SpilledScope::is_spilled_ = false;
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen->frame();
- if (frame != NULL) {
- frame->AssertIsSpilled();
- }
- }
- }
- ~RegisterAllocationScope() {
- SpilledScope::is_spilled_ = old_is_spilled_;
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen_->frame();
- if (frame != NULL) {
- frame->SpillAll();
- }
- }
- }
+ inline explicit RegisterAllocationScope(CodeGenerator* cgen);
+ inline ~RegisterAllocationScope();
private:
CodeGenerator* cgen_;
@@ -116,19 +101,20 @@ class VirtualFrame : public ZoneObject {
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
+ // Construct an invalid virtual frame, used by JumpTargets.
+ inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
+ inline CodeGenerator* cgen();
+ inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() { return element_count_; }
// The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
+ inline int height();
bool is_used(int num) {
switch (num) {
@@ -160,10 +146,6 @@ class VirtualFrame : public ZoneObject {
}
}
- bool is_used(Register reg) {
- return is_used(RegisterAllocator::ToNumber(reg));
- }
-
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
@@ -247,16 +229,13 @@ class VirtualFrame : public ZoneObject {
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
- AssertIsSpilled();
- return MemOperand(sp, index * kPointerSize);
+ int adjusted_index = index - kVirtualElements[top_of_stack_state_];
+ ASSERT(adjusted_index >= 0);
+ return MemOperand(sp, adjusted_index * kPointerSize);
}
// A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(fp, kLocal0Offset - index * kPointerSize);
- }
+ inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
@@ -268,26 +247,17 @@ class VirtualFrame : public ZoneObject {
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
- MemOperand ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
- }
+ inline MemOperand ParameterAt(int index);
// The receiver frame slot.
- MemOperand Receiver() { return ParameterAt(-1); }
+ inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- void CallStub(CodeStub* stub, int arg_count) {
- if (arg_count != 0) Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- masm()->CallStub(stub);
- }
+ inline void CallStub(CodeStub* stub, int arg_count);
// Call JS function from top of the stack with arguments
// taken from the stack.
@@ -308,7 +278,8 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
- // Call load IC. Receiver is on the stack. Result is returned in r0.
+ // Call load IC. Receiver is on the stack and is consumed. Result is returned
+ // in r0.
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
// Call store IC. If the load is contextual, value is found on top of the
@@ -320,8 +291,8 @@ class VirtualFrame : public ZoneObject {
// Result is returned in r0.
void CallKeyedLoadIC();
- // Call keyed store IC. Key and receiver are on the stack and the value is in
- // r0. Result is returned in r0.
+ // Call keyed store IC. Value, key and receiver are on the stack. All three
+ // are consumed. Result is returned in r0.
void CallKeyedStoreIC();
// Call into an IC stub given the number of arguments it removes
@@ -386,6 +357,12 @@ class VirtualFrame : public ZoneObject {
void EmitPush(MemOperand operand);
void EmitPushRoot(Heap::RootListIndex index);
+ // Overwrite the nth thing on the stack. If the nth position is in a
+ // register then this turns into a mov, otherwise an str. Afterwards
+ // you can still use the register even if it is a register that can be
+ // used for TOS (r0 or r1).
+ void SetElementAt(Register reg, int this_far_down);
+
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
@@ -449,13 +426,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
+ inline int parameter_count();
+ inline int local_count();
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
- int frame_pointer() { return parameter_count() + 3; }
+ inline int frame_pointer();
// The index of the first parameter. The receiver lies below the first
// parameter.
@@ -463,26 +440,22 @@ class VirtualFrame : public ZoneObject {
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
- int context_index() { return frame_pointer() - 1; }
+ inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
- int function_index() { return frame_pointer() - 2; }
+ inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
- int local0_index() { return frame_pointer() + 2; }
+ inline int local0_index();
// The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
+ inline int expression_base_index();
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
+ inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
@@ -494,10 +467,13 @@ class VirtualFrame : public ZoneObject {
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
+ // Emit instructions to get the top of stack state from where we are to where
+ // we want to be.
+ void MergeTOSTo(TopOfStack expected_state);
+
inline bool Equals(VirtualFrame* other);
friend class JumpTarget;
- friend class DeferredCode;
};
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 87f363b0c0..871ca86eda 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -424,8 +424,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
- case RelocInfo::EMBEDDED_STRING:
- return "embedded string";
case RelocInfo::CONSTRUCT_CALL:
return "code target (js construct call)";
case RelocInfo::CODE_TARGET_CONTEXT:
@@ -508,7 +506,6 @@ void RelocInfo::Verify() {
ASSERT(code->address() == HeapObject::cast(found)->address());
break;
}
- case RelocInfo::EMBEDDED_STRING:
case RUNTIME_ENTRY:
case JS_RETURN:
case COMMENT:
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 5d03c1f854..f2a6c8be39 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -121,7 +121,6 @@ class RelocInfo BASE_EMBEDDED {
DEBUG_BREAK,
CODE_TARGET, // code target which is not any of the above.
EMBEDDED_OBJECT,
- EMBEDDED_STRING,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@@ -137,7 +136,7 @@ class RelocInfo BASE_EMBEDDED {
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET,
- LAST_GCED_ENUM = EMBEDDED_STRING
+ LAST_GCED_ENUM = EMBEDDED_OBJECT
};
@@ -185,6 +184,11 @@ class RelocInfo BASE_EMBEDDED {
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
+ // Is the pointer this relocation info refers to coded like a plain pointer
+ // or is it strange in some way (eg relative or patched into a series of
+ // instructions).
+ bool IsCodedSpecially();
+
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h
new file mode 100644
index 0000000000..2b5d7c472b
--- /dev/null
+++ b/deps/v8/src/ast-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+ : labels_(labels), type_(type) {
+ ASSERT(labels == NULL || labels->length() > 0);
+}
+
+
+SwitchStatement::SwitchStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+ tag_(NULL), cases_(NULL) {
+}
+
+
+IterationStatement::IterationStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
+}
+
+
+Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+ : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+ statements_(capacity),
+ is_initializer_block_(is_initializer_block) {
+}
+
+
+ForStatement::ForStatement(ZoneStringList* labels)
+ : IterationStatement(labels),
+ init_(NULL),
+ cond_(NULL),
+ next_(NULL),
+ may_have_function_literal_(true),
+ loop_variable_(NULL),
+ peel_this_loop_(false) {
+}
+
+
+ForInStatement::ForInStatement(ZoneStringList* labels)
+ : IterationStatement(labels), each_(NULL), enumerable_(NULL) {
+}
+
+
+DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
+ : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 75b2945d9c..92df990063 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -32,6 +32,8 @@
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
+#include "ast-inl.h"
+#include "jump-target-inl.h"
namespace v8 {
namespace internal {
@@ -786,6 +788,13 @@ Block::Block(Block* other, ZoneList<Statement*>* statements)
}
+WhileStatement::WhileStatement(ZoneStringList* labels)
+ : IterationStatement(labels),
+ cond_(NULL),
+ may_have_function_literal_(true) {
+}
+
+
ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
Expression* expression)
: Statement(other), expression_(expression) {}
@@ -809,6 +818,11 @@ IterationStatement::IterationStatement(IterationStatement* other,
: BreakableStatement(other), body_(body) {}
+CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
+ : label_(label), statements_(statements) {
+}
+
+
ForStatement::ForStatement(ForStatement* other,
Statement* init,
Expression* cond,
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index dfc08ee071..a3a97341dd 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -351,10 +351,7 @@ class BreakableStatement: public Statement {
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
protected:
- BreakableStatement(ZoneStringList* labels, Type type)
- : labels_(labels), type_(type) {
- ASSERT(labels == NULL || labels->length() > 0);
- }
+ inline BreakableStatement(ZoneStringList* labels, Type type);
explicit BreakableStatement(BreakableStatement* other);
@@ -367,10 +364,7 @@ class BreakableStatement: public Statement {
class Block: public BreakableStatement {
public:
- Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
- : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
- is_initializer_block_(is_initializer_block) { }
+ inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
// Construct a clone initialized from the original block and
// a deep copy of all statements of the original block.
@@ -437,8 +431,7 @@ class IterationStatement: public BreakableStatement {
BreakTarget* continue_target() { return &continue_target_; }
protected:
- explicit IterationStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
+ explicit inline IterationStatement(ZoneStringList* labels);
// Construct a clone initialized from original and
// a deep copy of the original body.
@@ -456,9 +449,7 @@ class IterationStatement: public BreakableStatement {
class DoWhileStatement: public IterationStatement {
public:
- explicit DoWhileStatement(ZoneStringList* labels)
- : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
- }
+ explicit inline DoWhileStatement(ZoneStringList* labels);
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
@@ -482,11 +473,7 @@ class DoWhileStatement: public IterationStatement {
class WhileStatement: public IterationStatement {
public:
- explicit WhileStatement(ZoneStringList* labels)
- : IterationStatement(labels),
- cond_(NULL),
- may_have_function_literal_(true) {
- }
+ explicit WhileStatement(ZoneStringList* labels);
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
@@ -511,14 +498,7 @@ class WhileStatement: public IterationStatement {
class ForStatement: public IterationStatement {
public:
- explicit ForStatement(ZoneStringList* labels)
- : IterationStatement(labels),
- init_(NULL),
- cond_(NULL),
- next_(NULL),
- may_have_function_literal_(true),
- loop_variable_(NULL),
- peel_this_loop_(false) {}
+ explicit inline ForStatement(ZoneStringList* labels);
// Construct a for-statement initialized from another for-statement
// and deep copies of all parts of the original statement.
@@ -574,8 +554,7 @@ class ForStatement: public IterationStatement {
class ForInStatement: public IterationStatement {
public:
- explicit ForInStatement(ZoneStringList* labels)
- : IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
+ explicit inline ForInStatement(ZoneStringList* labels);
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
IterationStatement::Initialize(body);
@@ -691,8 +670,7 @@ class WithExitStatement: public Statement {
class CaseClause: public ZoneObject {
public:
- CaseClause(Expression* label, ZoneList<Statement*>* statements)
- : label_(label), statements_(statements) { }
+ CaseClause(Expression* label, ZoneList<Statement*>* statements);
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@@ -711,9 +689,7 @@ class CaseClause: public ZoneObject {
class SwitchStatement: public BreakableStatement {
public:
- explicit SwitchStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
- tag_(NULL), cases_(NULL) { }
+ explicit inline SwitchStatement(ZoneStringList* labels);
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index df1e98a66b..087413118f 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1753,8 +1753,8 @@ Genesis::Genesis(Handle<Object> global_object,
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
- if (!InstallNatives()) return;
InstallJSFunctionResultCaches();
+ if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 4971275792..9a0fbd2704 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -330,22 +330,19 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
}
-static bool ArrayPrototypeHasNoElements() {
+static bool ArrayPrototypeHasNoElements(Context* global_context,
+ JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
- Context* global_context = Top::context()->global_context();
- // Array.prototype
- JSObject* proto =
- JSObject::cast(global_context->array_function()->prototype());
- if (proto->elements() != Heap::empty_fixed_array()) return false;
+ if (array_proto->elements() != Heap::empty_fixed_array()) return false;
// Hidden prototype
- proto = JSObject::cast(proto->GetPrototype());
- ASSERT(proto->elements() == Heap::empty_fixed_array());
+ array_proto = JSObject::cast(array_proto->GetPrototype());
+ ASSERT(array_proto->elements() == Heap::empty_fixed_array());
// Object.prototype
- proto = JSObject::cast(proto->GetPrototype());
- if (proto != global_context->initial_object_prototype()) return false;
- if (proto->elements() != Heap::empty_fixed_array()) return false;
- ASSERT(proto->GetPrototype()->IsNull());
+ array_proto = JSObject::cast(array_proto->GetPrototype());
+ if (array_proto != global_context->initial_object_prototype()) return false;
+ if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+ ASSERT(array_proto->GetPrototype()->IsNull());
return true;
}
@@ -368,6 +365,18 @@ static bool IsJSArrayWithFastElements(Object* receiver,
}
+static bool IsFastElementMovingAllowed(Object* receiver,
+ FixedArray** elements) {
+ if (!IsJSArrayWithFastElements(receiver, elements)) return false;
+
+ Context* global_context = Top::context()->global_context();
+ JSObject* array_proto =
+ JSObject::cast(global_context->array_function()->prototype());
+ if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false;
+ return ArrayPrototypeHasNoElements(global_context, array_proto);
+}
+
+
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
@@ -465,11 +474,7 @@ BUILTIN(ArrayPop) {
return top;
}
- // Remember to check the prototype chain.
- JSFunction* array_function =
- Top::context()->global_context()->array_function();
- JSObject* prototype = JSObject::cast(array_function->prototype());
- top = prototype->GetElement(len - 1);
+ top = array->GetPrototype()->GetElement(len - 1);
return top;
}
@@ -478,8 +483,7 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)
- || !ArrayPrototypeHasNoElements()) {
+ if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArrayShift", args);
}
JSArray* array = JSArray::cast(receiver);
@@ -515,8 +519,7 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)
- || !ArrayPrototypeHasNoElements()) {
+ if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArrayUnshift", args);
}
JSArray* array = JSArray::cast(receiver);
@@ -565,8 +568,7 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)
- || !ArrayPrototypeHasNoElements()) {
+ if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArraySlice", args);
}
JSArray* array = JSArray::cast(receiver);
@@ -635,8 +637,7 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
- if (!IsJSArrayWithFastElements(receiver, &elms)
- || !ArrayPrototypeHasNoElements()) {
+ if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArraySplice", args);
}
JSArray* array = JSArray::cast(receiver);
@@ -788,7 +789,10 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
- if (!ArrayPrototypeHasNoElements()) {
+ Context* global_context = Top::context()->global_context();
+ JSObject* array_proto =
+ JSObject::cast(global_context->array_function()->prototype());
+ if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
return CallJsBuiltin("ArrayConcat", args);
}
@@ -798,7 +802,8 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
+ if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+ || JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin("ArrayConcat", args);
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index a5bb31f141..358c6fccd3 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -28,7 +28,6 @@
#ifndef V8_CODEGEN_H_
#define V8_CODEGEN_H_
-#include "ast.h"
#include "code-stubs.h"
#include "runtime.h"
#include "type-info.h"
@@ -115,7 +114,7 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
- F(RandomHeapNumber, 0, 1) \
+ F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 901f2186a0..27d4835dcf 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -120,7 +120,21 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
- if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+ bool force_full_compiler = false;
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+ // On ia32 the full compiler can compile all code whereas the other platforms
+ // the constructs supported is checked by the associated syntax checker. When
+ // --always-full-compiler is used on ia32 the syntax checker is still in
+ // effect, but there is a special flag --force-full-compiler to ignore the
+ // syntax checker completely and use the full compiler for all code. Also
+ // when debugging on ia32 the full compiler will be used for all code.
+ force_full_compiler =
+ Debugger::IsDebuggerActive() || FLAG_force_full_compiler;
+#endif
+
+ if (force_full_compiler) {
+ return FullCodeGenerator::MakeCode(info);
+ } else if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
FullCodeGenSyntaxChecker checker;
checker.Check(function);
if (checker.has_supported_syntax()) {
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index e454a9a93c..9ef6841dc6 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -54,7 +54,7 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddAlias(alias, start);
+ code_map->AddAlias(start, entry, code_start);
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index ed3f6925e0..52a891f925 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -141,13 +141,15 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
- Address start) {
+ Address start,
+ int security_token_id) {
CodeEventsContainer evt_rec;
CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
rec->type = CodeEventRecord::CODE_ALIAS;
rec->order = ++enqueue_order_;
- rec->alias = alias;
- rec->start = start;
+ rec->start = alias;
+ rec->entry = generator_->NewCodeEntry(security_token_id);
+ rec->code_start = start;
events_buffer_.Enqueue(evt_rec);
}
@@ -257,26 +259,30 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
}
-CpuProfile* CpuProfiler::StopProfiling(String* title) {
- return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
+ return is_profiling() ?
+ singleton_->StopCollectingProfile(security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
ASSERT(singleton_ != NULL);
- return singleton_->profiles_->profiles()->length();
+ // The count of profiles doesn't depend on a security token.
+ return singleton_->profiles_->Profiles(CodeEntry::kNoSecurityToken)->length();
}
-CpuProfile* CpuProfiler::GetProfile(int index) {
+CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
ASSERT(singleton_ != NULL);
- return singleton_->profiles_->profiles()->at(index);
+ const int token = singleton_->token_enumerator_->GetTokenId(security_token);
+ return singleton_->profiles_->Profiles(token)->at(index);
}
-CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
ASSERT(singleton_ != NULL);
- return singleton_->profiles_->GetProfile(uid);
+ const int token = singleton_->token_enumerator_->GetTokenId(security_token);
+ return singleton_->profiles_->GetProfile(token, uid);
}
@@ -348,8 +354,15 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
+ int security_token_id = CodeEntry::kNoSecurityToken;
+ if (function->unchecked_context()->IsContext()) {
+ security_token_id = singleton_->token_enumerator_->GetTokenId(
+ function->context()->global_context()->security_token());
+ }
singleton_->processor_->FunctionCreateEvent(
- function->address(), function->code()->address());
+ function->address(),
+ function->code()->address(),
+ security_token_id);
}
@@ -388,12 +401,14 @@ void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
CpuProfiler::CpuProfiler()
: profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
+ token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL) {
}
CpuProfiler::~CpuProfiler() {
+ delete token_enumerator_;
delete profiles_;
}
@@ -438,7 +453,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
- CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
+ CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken,
+ title,
+ actual_sampling_rate);
if (result != NULL) {
result->Print();
}
@@ -446,10 +463,12 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
}
-CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
+CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
+ String* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
- return profiles_->StopProfiling(title, actual_sampling_rate);
+ int token = token_enumerator_->GetTokenId(security_token);
+ return profiles_->StopProfiling(token, title, actual_sampling_rate);
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 35d8d5e060..a51133d424 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -41,7 +41,7 @@ class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
-
+class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@@ -94,8 +94,9 @@ class CodeDeleteEventRecord : public CodeEventRecord {
class CodeAliasEventRecord : public CodeEventRecord {
public:
- Address alias;
Address start;
+ CodeEntry* entry;
+ Address code_start;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -151,7 +152,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
- void FunctionCreateEvent(Address alias, Address start);
+ void FunctionCreateEvent(Address alias, Address start, int security_token_id);
void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
@@ -212,10 +213,10 @@ class CpuProfiler {
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
- static CpuProfile* StopProfiling(String* title);
+ static CpuProfile* StopProfiling(Object* security_token, String* title);
static int GetProfilesCount();
- static CpuProfile* GetProfile(int index);
- static CpuProfile* FindProfile(unsigned uid);
+ static CpuProfile* GetProfile(Object* security_token, int index);
+ static CpuProfile* FindProfile(Object* security_token, unsigned uid);
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent();
@@ -252,11 +253,12 @@ class CpuProfiler {
void StartCollectingProfile(String* title);
void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title);
- CpuProfile* StopCollectingProfile(String* title);
+ CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile();
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
+ TokenEnumerator* token_enumerator_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index b9ff09cee9..5c3da13a67 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -341,6 +341,11 @@ function DebugRequest(cmd_line) {
this.request_ = this.breakCommandToJSONRequest_(args);
break;
+ case 'breakpoints':
+ case 'bb':
+ this.request_ = this.breakpointsCommandToJSONRequest_(args);
+ break;
+
case 'clear':
this.request_ = this.clearCommandToJSONRequest_(args);
break;
@@ -770,6 +775,15 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
};
+DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
+ if (args && args.length > 0) {
+ throw new Error('Unexpected arguments.');
+ }
+ var request = this.createRequest('listbreakpoints');
+ return request.toJSONProtocol();
+};
+
+
// Create a JSON request for the clear command.
DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
@@ -947,6 +961,39 @@ function DebugResponseDetails(response) {
result += body.breakpoint;
details.text = result;
break;
+
+ case 'listbreakpoints':
+ result = 'breakpoints: (' + body.breakpoints.length + ')';
+ for (var i = 0; i < body.breakpoints.length; i++) {
+ var breakpoint = body.breakpoints[i];
+ result += '\n id=' + breakpoint.number;
+ result += ' type=' + breakpoint.type;
+ if (breakpoint.script_id) {
+ result += ' script_id=' + breakpoint.script_id;
+ }
+ if (breakpoint.script_name) {
+ result += ' script_name=' + breakpoint.script_name;
+ }
+ result += ' line=' + breakpoint.line;
+ if (breakpoint.column != null) {
+ result += ' column=' + breakpoint.column;
+ }
+ if (breakpoint.groupId) {
+ result += ' groupId=' + breakpoint.groupId;
+ }
+ if (breakpoint.ignoreCount) {
+ result += ' ignoreCount=' + breakpoint.ignoreCount;
+ }
+ if (breakpoint.active === false) {
+ result += ' inactive';
+ }
+ if (breakpoint.condition) {
+ result += ' condition=' + breakpoint.condition;
+ }
+ result += ' hit_count=' + breakpoint.hit_count;
+ }
+ details.text = result;
+ break;
case 'backtrace':
if (body.totalFrames == 0) {
@@ -1136,8 +1183,8 @@ function DebugResponseDetails(response) {
default:
details.text =
- 'Response for unknown command \'' + response.command + '\'' +
- ' (' + json_response + ')';
+ 'Response for unknown command \'' + response.command() + '\'' +
+ ' (' + response.raw_json() + ')';
}
} catch (e) {
details.text = 'Error: "' + e + '" formatting response';
@@ -1153,6 +1200,7 @@ function DebugResponseDetails(response) {
* @constructor
*/
function ProtocolPackage(json) {
+ this.raw_json_ = json;
this.packet_ = JSON.parse(json);
this.refs_ = [];
if (this.packet_.refs) {
@@ -1243,6 +1291,11 @@ ProtocolPackage.prototype.lookup = function(handle) {
}
+ProtocolPackage.prototype.raw_json = function() {
+ return this.raw_json_;
+}
+
+
function ProtocolValue(value, packet) {
this.value_ = value;
this.packet_ = packet;
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index b9e19d68ed..e780cb86a5 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -238,7 +238,15 @@ function LocalTime(time) {
return time + DaylightSavingsOffset(time) + local_time_offset;
}
+
+var ltcache = {
+ key: null,
+ val: null
+};
+
function LocalTimeNoCheck(time) {
+ var ltc = ltcache;
+ if (%_ObjectEquals(time, ltc.key)) return ltc.val;
if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
return $NaN;
}
@@ -252,7 +260,8 @@ function LocalTimeNoCheck(time) {
} else {
var dst_offset = DaylightSavingsOffset(time);
}
- return time + local_time_offset + dst_offset;
+ ltc.key = time;
+ return (ltc.val = time + local_time_offset + dst_offset);
}
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index e94cee41d2..77fa1ddd65 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -1266,6 +1266,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.clearBreakPointRequest_(request, response);
} else if (request.command == 'clearbreakpointgroup') {
this.clearBreakPointGroupRequest_(request, response);
+ } else if (request.command == 'listbreakpoints') {
+ this.listBreakpointsRequest_(request, response);
} else if (request.command == 'backtrace') {
this.backtraceRequest_(request, response);
} else if (request.command == 'frame') {
@@ -1581,6 +1583,35 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, resp
response.body = { breakpoint: break_point }
}
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
+ var array = [];
+ for (var i = 0; i < script_break_points.length; i++) {
+ var break_point = script_break_points[i];
+
+ var description = {
+ number: break_point.number(),
+ line: break_point.line(),
+ column: break_point.column(),
+ groupId: break_point.groupId(),
+ hit_count: break_point.hit_count(),
+ active: break_point.active(),
+ condition: break_point.condition(),
+ ignoreCount: break_point.ignoreCount()
+ }
+
+ if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
+ description.type = 'scriptId';
+ description.script_id = break_point.script_id();
+ } else {
+ description.type = 'scriptName';
+ description.script_name = break_point.script_name();
+ }
+ array.push(description);
+ }
+
+ response.body = { breakpoints: array }
+}
+
DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
// Get the number of frames.
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index bf1f893b7d..8cb95efd06 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -72,6 +72,17 @@ static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
}
+static v8::Handle<v8::Context> GetDebugEventContext() {
+ Handle<Context> context = Debug::debugger_entry()->GetContext();
+ // Top::context() may have been NULL when "script collected" event occured.
+ if (*context == NULL) {
+ return v8::Local<v8::Context>();
+ }
+ Handle<Context> global_context(context->global_context());
+ return v8::Utils::ToLocal(global_context);
+}
+
+
BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
BreakLocatorType type) {
debug_info_ = debug_info;
@@ -2112,12 +2123,14 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
if (event_listener_->IsProxy()) {
// C debug event listener.
Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
- v8::Debug::EventCallback callback =
- FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
- callback(event,
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
- v8::Utils::ToLocal(event_data),
- v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
+ v8::Debug::EventCallback2 callback =
+ FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+ EventDetailsImpl event_details(
+ event,
+ Handle<JSObject>::cast(exec_state),
+ event_data,
+ event_listener_data_);
+ callback(event_details);
} else {
// JavaScript debug event listener.
ASSERT(event_listener_->IsJSFunction());
@@ -2643,14 +2656,10 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Handle<Context> context = Debug::debugger_entry()->GetContext();
- // Top::context() may have been NULL when "script collected" event occured.
- if (*context == NULL) {
- ASSERT(event_ == v8::ScriptCollected);
- return v8::Local<v8::Context>();
- }
- Handle<Context> global_context(context->global_context());
- return v8::Utils::ToLocal(global_context);
+ v8::Handle<v8::Context> context = GetDebugEventContext();
+ // Top::context() may be NULL when "script collected" event occures.
+ ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
+ return GetDebugEventContext();
}
@@ -2659,6 +2668,41 @@ v8::Debug::ClientData* MessageImpl::GetClientData() const {
}
+EventDetailsImpl::EventDetailsImpl(DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ Handle<Object> callback_data)
+ : event_(event),
+ exec_state_(exec_state),
+ event_data_(event_data),
+ callback_data_(callback_data) {}
+
+
+DebugEvent EventDetailsImpl::GetEvent() const {
+ return event_;
+}
+
+
+v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
+ return v8::Utils::ToLocal(exec_state_);
+}
+
+
+v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
+ return v8::Utils::ToLocal(event_data_);
+}
+
+
+v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
+ return GetDebugEventContext();
+}
+
+
+v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
+ return v8::Utils::ToLocal(callback_data_);
+}
+
+
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index e7ac94e319..e2eecb8bfb 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -524,6 +524,27 @@ class MessageImpl: public v8::Debug::Message {
};
+// Details of the debug event delivered to the debug event listener.
+class EventDetailsImpl : public v8::Debug::EventDetails {
+ public:
+ EventDetailsImpl(DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ Handle<Object> callback_data);
+ virtual DebugEvent GetEvent() const;
+ virtual v8::Handle<v8::Object> GetExecutionState() const;
+ virtual v8::Handle<v8::Object> GetEventData() const;
+ virtual v8::Handle<v8::Context> GetEventContext() const;
+ virtual v8::Handle<v8::Value> GetCallbackData() const;
+ private:
+ DebugEvent event_; // Debug event causing the break.
+ Handle<JSObject> exec_state_; // Current execution state.
+ Handle<JSObject> event_data_; // Data associated with the event.
+ Handle<Object> callback_data_; // User data passed with the callback when
+ // it was registered.
+};
+
+
// Message send by user to v8 debugger or debugger output message.
// In addition to command text it may contain a pointer to some user data
// which are expected to be passed along with the command reponse to message
@@ -693,8 +714,9 @@ class Debugger {
static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
- private:
static bool IsDebuggerActive();
+
+ private:
static void ListenersChanged();
static Mutex* debugger_access_; // Mutex guarding debugger variables.
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 490a2c5408..c360508c17 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -100,10 +100,10 @@ private:
DEFINE_bool(debug_code, false,
"generate extra code (comments, assertions) for debugging")
DEFINE_bool(emit_branch_hints, false, "emit branch hints")
-DEFINE_bool(push_pop_elimination, true,
- "eliminate redundant push/pops in assembly code")
-DEFINE_bool(print_push_pop_elimination, false,
- "print elimination of redundant push/pops in assembly code")
+DEFINE_bool(peephole_optimization, true,
+ "perform peephole optimizations in assembly code")
+DEFINE_bool(print_peephole_optimization, false,
+ "print peephole optimizations in assembly code")
DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
@@ -149,6 +149,10 @@ DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+DEFINE_bool(force_full_compiler, false,
+ "force use of the dedicated run-once backend for all code")
+#endif
DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
@@ -182,6 +186,11 @@ DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
"print one trace line following each garbage collection")
+DEFINE_bool(trace_gc_nvp, false,
+ "print one detailed trace line in name=value format "
+ "after each garbage collection")
+DEFINE_bool(print_cumulative_gc_stat, false,
+ "print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 699a1e97d5..2ccbca87ef 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -760,11 +760,6 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
}
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
@@ -810,6 +805,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Visit(stmt->body());
__ bind(loop_statement.continue_target());
+
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
@@ -872,11 +868,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
SetStatementPosition(stmt);
@@ -995,12 +986,6 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
@@ -1034,6 +1019,24 @@ void FullCodeGenerator::VisitLiteral(Literal* expr) {
}
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<SharedFunctionInfo> function_info =
+ Compiler::BuildFunctionInfo(expr, script(), this);
+ if (HasStackOverflow()) return;
+ EmitNewClosure(function_info);
+}
+
+
+void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* expr) {
+ Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+ EmitNewClosure(expr->shared_function_info());
+}
+
+
void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 96d0f3e7e6..c7d0093712 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,6 +31,7 @@
#include "v8.h"
#include "ast.h"
+#include "compiler.h"
namespace v8 {
namespace internal {
@@ -229,8 +230,6 @@ class FullCodeGenerator: public AstVisitor {
return stack_depth + kForInStackElementCount;
}
private:
- // TODO(lrn): Check that this value is correct when implementing
- // for-in.
static const int kForInStackElementCount = 5;
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
@@ -258,12 +257,22 @@ class FullCodeGenerator: public AstVisitor {
// context.
void DropAndApply(int count, Expression::Context context, Register reg);
+ // Set up branch labels for a test expression.
+ void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false);
+
// Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context.
void Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false);
+ // Emit code to convert constant control flow (true or false) into
+ // the result expected according to an expression context.
+ void Apply(Expression::Context context, bool flag);
+
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
@@ -348,6 +357,12 @@ class FullCodeGenerator: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Platform-specific code for a variable, constant, or function
+ // declaration. Functions have an initial value.
+ void EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function);
+
// Platform-specific return sequence
void EmitReturnSequence(int position);
@@ -355,9 +370,48 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+
+ // Platform-specific code for inline runtime calls.
+ void EmitInlineRuntimeCall(CallRuntime* expr);
+ void EmitIsSmi(ZoneList<Expression*>* arguments);
+ void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
+ void EmitIsObject(ZoneList<Expression*>* arguments);
+ void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
+ void EmitIsFunction(ZoneList<Expression*>* arguments);
+ void EmitIsArray(ZoneList<Expression*>* arguments);
+ void EmitIsRegExp(ZoneList<Expression*>* arguments);
+ void EmitIsConstructCall(ZoneList<Expression*>* arguments);
+ void EmitObjectEquals(ZoneList<Expression*>* arguments);
+ void EmitArguments(ZoneList<Expression*>* arguments);
+ void EmitArgumentsLength(ZoneList<Expression*>* arguments);
+ void EmitClassOf(ZoneList<Expression*>* arguments);
+ void EmitValueOf(ZoneList<Expression*>* arguments);
+ void EmitSetValueOf(ZoneList<Expression*>* arguments);
+ void EmitNumberToString(ZoneList<Expression*>* arguments);
+ void EmitCharFromCode(ZoneList<Expression*>* arguments);
+ void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
+ void EmitStringCompare(ZoneList<Expression*>* arguments);
+ void EmitStringAdd(ZoneList<Expression*>* arguments);
+ void EmitLog(ZoneList<Expression*>* arguments);
+ void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
+ void EmitSubString(ZoneList<Expression*>* arguments);
+ void EmitRegExpExec(ZoneList<Expression*>* arguments);
+ void EmitMathPow(ZoneList<Expression*>* arguments);
+ void EmitMathSin(ZoneList<Expression*>* arguments);
+ void EmitMathCos(ZoneList<Expression*>* arguments);
+ void EmitMathSqrt(ZoneList<Expression*>* arguments);
+ void EmitCallFunction(ZoneList<Expression*>* arguments);
+ void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
+ void EmitSwapElements(ZoneList<Expression*>* arguments);
+ void EmitGetFromCache(ZoneList<Expression*>* arguments);
+
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
+ // Platform-specific support for allocating a new closure based on
+ // the given function info.
+ void EmitNewClosure(Handle<SharedFunctionInfo> info);
+
// Platform-specific support for compiling assignments.
// Load a value from a named property.
@@ -372,9 +426,15 @@ class FullCodeGenerator: public AstVisitor {
// of the stack and the right one in the accumulator.
void EmitBinaryOp(Token::Value op, Expression::Context context);
+ // Assign to the given expression as if via '='. The right-hand-side value
+ // is expected in the accumulator.
+ void EmitAssignment(Expression* expr);
+
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
- void EmitVariableAssignment(Variable* var, Expression::Context context);
+ void EmitVariableAssignment(Variable* var,
+ Token::Value op,
+ Expression::Context context);
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
@@ -385,6 +445,14 @@ class FullCodeGenerator: public AstVisitor {
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
+ // Helper for compare operations. Expects the null-value in a register.
+ void EmitNullCompare(bool strict,
+ Register obj,
+ Register null_const,
+ Label* if_true,
+ Label* if_false,
+ Register scratch);
+
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 981ea16d72..292d8d8040 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -59,6 +59,24 @@ namespace internal {
#error Host architecture was not detected as supported by v8
#endif
+// Target architecture detection. This may be set externally. If not, detect
+// in the same way as the host architecture, that is, target the native
+// environment as presented by the compiler.
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
+ !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_TARGET_ARCH_X64 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_TARGET_ARCH_IA32 1
+#elif defined(__ARMEL__)
+#define V8_TARGET_ARCH_ARM 1
+#elif defined(_MIPS_ARCH_MIPS32R2)
+#define V8_TARGET_ARCH_MIPS 1
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+#endif
+
// Check for supported combinations of host and target architectures.
#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
#error Target architecture ia32 is only supported on ia32 host
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 0a276ca995..d554a3ba68 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -115,8 +115,11 @@ int Heap::external_allocation_limit_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
+int Heap::ms_count_ = 0;
int Heap::gc_count_ = 0;
+GCTracer* Heap::tracer_ = NULL;
+
int Heap::unflattened_strings_length_ = 0;
int Heap::always_allocate_scope_depth_ = 0;
@@ -130,6 +133,11 @@ int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
+int GCTracer::alive_after_last_gc_ = 0;
+double GCTracer::last_gc_end_timestamp_ = 0.0;
+int GCTracer::max_gc_pause_ = 0;
+int GCTracer::max_alive_after_gc_ = 0;
+int GCTracer::min_in_mutator_ = kMaxInt;
int Heap::Capacity() {
if (!HasBeenSetup()) return 0;
@@ -570,7 +578,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
- GCTracer::ExternalScope scope(tracer);
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_prologue_callback_();
}
@@ -596,14 +604,16 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
} else {
+ tracer_ = tracer;
Scavenge();
+ tracer_ = NULL;
}
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
- GCTracer::ExternalScope scope(tracer);
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
@@ -627,7 +637,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
- GCTracer::ExternalScope scope(tracer);
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
VerifySymbolTable();
@@ -636,7 +646,11 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
- mc_count_++;
+ if (MarkCompactCollector::IsCompacting()) {
+ mc_count_++;
+ } else {
+ ms_count_++;
+ }
tracer->set_full_gc_count(mc_count_);
LOG(ResourceEvent("markcompact", "begin"));
@@ -1179,6 +1193,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
node->set_size(object_size);
*p = target;
+ tracer()->increment_promoted_objects_size(object_size);
return;
}
} else {
@@ -1214,6 +1229,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
(*p)->Iterate(&v);
#endif
}
+ tracer()->increment_promoted_objects_size(object_size);
return;
}
}
@@ -2064,7 +2080,7 @@ Object* Heap::AllocateSubString(String* buffer,
}
// Make an attempt to flatten the buffer to reduce access time.
- buffer->TryFlatten();
+ buffer = buffer->TryFlattenGetString();
Object* result = buffer->IsAsciiRepresentation()
? AllocateRawAsciiString(length, pretenure )
@@ -3760,6 +3776,17 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
+ if (FLAG_print_cumulative_gc_stat) {
+ PrintF("\n\n");
+ PrintF("gc_count=%d ", gc_count_);
+ PrintF("mark_sweep_count=%d ", ms_count_);
+ PrintF("mark_compact_count=%d ", mc_count_);
+ PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
+ PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
+ PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
+ PrintF("\n\n");
+ }
+
GlobalHandles::TearDown();
ExternalStringTable::TearDown();
@@ -4235,33 +4262,114 @@ void Heap::TracePathToGlobal() {
#endif
+static int CountTotalHolesSize() {
+ int holes_size = 0;
+ OldSpaces spaces;
+ for (OldSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ holes_size += space->Waste() + space->AvailableFree();
+ }
+ return holes_size;
+}
+
+
GCTracer::GCTracer()
: start_time_(0.0),
- start_size_(0.0),
- external_time_(0.0),
+ start_size_(0),
gc_count_(0),
full_gc_count_(0),
is_compacting_(false),
- marked_count_(0) {
+ marked_count_(0),
+ allocated_since_last_gc_(0),
+ spent_in_mutator_(0),
+ promoted_objects_size_(0) {
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
previous_has_compacted_ = MarkCompactCollector::HasCompacted();
previous_marked_count_ = MarkCompactCollector::previous_marked_count();
- if (!FLAG_trace_gc) return;
+ if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
- start_size_ = SizeOfHeapObjects();
+ start_size_ = Heap::SizeOfObjects();
+
+ for (int i = 0; i < Scope::kNumberOfScopes; i++) {
+ scopes_[i] = 0;
+ }
+
+ in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
+
+ allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
+
+ if (last_gc_end_timestamp_ > 0) {
+ spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
+ }
}
GCTracer::~GCTracer() {
- if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
- int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
- int external_time = static_cast<int>(external_time_);
- PrintF("%s %.1f -> %.1f MB, ",
- CollectorString(), start_size_, SizeOfHeapObjects());
- if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms.\n", time);
+ if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
+
+ bool first_gc = (last_gc_end_timestamp_ == 0);
+
+ alive_after_last_gc_ = Heap::SizeOfObjects();
+ last_gc_end_timestamp_ = OS::TimeCurrentMillis();
+
+ int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
+
+ // Update cumulative GC statistics if required.
+ if (FLAG_print_cumulative_gc_stat) {
+ max_gc_pause_ = Max(max_gc_pause_, time);
+ max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
+ if (!first_gc) {
+ min_in_mutator_ = Min(min_in_mutator_,
+ static_cast<int>(spent_in_mutator_));
+ }
+ }
+
+ if (!FLAG_trace_gc_nvp) {
+ int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
+
+ PrintF("%s %.1f -> %.1f MB, ",
+ CollectorString(),
+ static_cast<double>(start_size_) / MB,
+ SizeOfHeapObjects());
+
+ if (external_time > 0) PrintF("%d / ", external_time);
+ PrintF("%d ms.\n", time);
+ } else {
+ PrintF("pause=%d ", time);
+ PrintF("mutator=%d ",
+ static_cast<int>(spent_in_mutator_));
+
+ PrintF("gc=");
+ switch (collector_) {
+ case SCAVENGER:
+ PrintF("s");
+ break;
+ case MARK_COMPACTOR:
+ PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ PrintF(" ");
+
+ PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
+ PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
+ PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
+ PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+
+ PrintF("total_size_before=%d ", start_size_);
+ PrintF("total_size_after=%d ", Heap::SizeOfObjects());
+ PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
+ PrintF("holes_size_after=%d ", CountTotalHolesSize());
+
+ PrintF("allocated=%d ", allocated_since_last_gc_);
+ PrintF("promoted=%d ", promoted_objects_size_);
+
+ PrintF("\n");
+ }
#if defined(ENABLE_LOGGING_AND_PROFILING)
Heap::PrintShortHeapStatistics();
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index b4af6d9c22..74e5a31b19 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -981,6 +981,8 @@ class Heap : public AllStatic {
static void ClearJSFunctionResultCaches();
+ static GCTracer* tracer() { return tracer_; }
+
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
@@ -1020,6 +1022,7 @@ class Heap : public AllStatic {
static int PromotedExternalMemorySize();
static int mc_count_; // how many mark-compact collections happened
+ static int ms_count_; // how many mark-sweep collections happened
static int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
@@ -1223,6 +1226,8 @@ class Heap : public AllStatic {
SharedFunctionInfo* shared,
Object* prototype);
+ static GCTracer* tracer_;
+
// Initializes the number to string cache based on the max semispace size.
static Object* InitializeNumberStringCache();
@@ -1629,19 +1634,30 @@ class DisableAssertNoAllocation {
class GCTracer BASE_EMBEDDED {
public:
- // Time spent while in the external scope counts towards the
- // external time in the tracer and will be reported separately.
- class ExternalScope BASE_EMBEDDED {
+ class Scope BASE_EMBEDDED {
public:
- explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
+ enum ScopeId {
+ EXTERNAL,
+ MC_MARK,
+ MC_SWEEP,
+ MC_COMPACT,
+ kNumberOfScopes
+ };
+
+ Scope(GCTracer* tracer, ScopeId scope)
+ : tracer_(tracer),
+ scope_(scope) {
start_time_ = OS::TimeCurrentMillis();
}
- ~ExternalScope() {
- tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
+
+ ~Scope() {
+ ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
+ tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
}
private:
GCTracer* tracer_;
+ ScopeId scope_;
double start_time_;
};
@@ -1667,6 +1683,19 @@ class GCTracer BASE_EMBEDDED {
int marked_count() { return marked_count_; }
+ void increment_promoted_objects_size(int object_size) {
+ promoted_objects_size_ += object_size;
+ }
+
+ // Returns maximum GC pause.
+ static int get_max_gc_pause() { return max_gc_pause_; }
+
+ // Returns maximum size of objects alive after GC.
+ static int get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+ // Returns minimal interval between two subsequent collections.
+ static int get_min_in_mutator() { return min_in_mutator_; }
+
private:
// Returns a string matching the collector.
const char* CollectorString();
@@ -1677,12 +1706,9 @@ class GCTracer BASE_EMBEDDED {
}
double start_time_; // Timestamp set in the constructor.
- double start_size_; // Size of objects in heap set in constructor.
+ int start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
- // Keep track of the amount of time spent in external callbacks.
- double external_time_;
-
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
@@ -1706,6 +1732,38 @@ class GCTracer BASE_EMBEDDED {
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
+
+ // Amounts of time spent in different scopes during GC.
+ double scopes_[Scope::kNumberOfScopes];
+
+ // Total amount of space either wasted or contained in one of free lists
+ // before the current GC.
+ int in_free_list_or_wasted_before_gc_;
+
+ // Difference between space used in the heap at the beginning of the current
+ // collection and the end of the previous collection.
+ int allocated_since_last_gc_;
+
+ // Amount of time spent in mutator that is time elapsed between end of the
+ // previous collection and the beginning of the current one.
+ double spent_in_mutator_;
+
+ // Size of objects promoted during the current collection.
+ int promoted_objects_size_;
+
+ // Maximum GC pause.
+ static int max_gc_pause_;
+
+ // Maximum size of objects alive after GC.
+ static int max_alive_after_gc_;
+
+ // Minimal interval between two subsequent collections.
+ static int min_in_mutator_;
+
+ // Size of objects alive after last GC.
+ static int alive_after_last_gc_;
+
+ static double last_gc_end_timestamp_;
};
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 6dc584e62b..1d88220469 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -159,11 +159,6 @@ Immediate::Immediate(const ExternalReference& ext) {
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
-Immediate::Immediate(const char* s) {
- x_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
Immediate::Immediate(Label* internal_offset) {
x_ = reinterpret_cast<int32_t>(internal_offset);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 26e40b15bc..4690c67289 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -36,6 +36,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
@@ -160,6 +162,15 @@ const int RelocInfo::kApplyMask =
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on IA32 means that it is a relative address, as used by
+ // branch instructions. These are also the ones that need changing when a
+ // code object moves.
+ return (1 << rmode_) & kApplyMask;
+}
+
+
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
for (int i = 0; i < instruction_count; i++) {
@@ -433,7 +444,7 @@ void Assembler::push(const Operand& src) {
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+ if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
// (last_pc_ != NULL) is rolled into the above check.
// If a last_pc_ is set, we need to make sure that there has not been any
// relocation information generated between the last instruction and this
@@ -443,7 +454,7 @@ void Assembler::pop(Register dst) {
int push_reg_code = instr & 0x7;
if (push_reg_code == dst.code()) {
pc_ = last_pc_;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
}
} else {
@@ -452,7 +463,7 @@ void Assembler::pop(Register dst) {
Register src = { push_reg_code };
EnsureSpace ensure_space(this);
emit_operand(dst, Operand(src));
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
}
}
@@ -466,7 +477,7 @@ void Assembler::pop(Register dst) {
last_pc_[0] = 0x8b;
last_pc_[1] = op1;
last_pc_ = NULL;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
}
return;
@@ -483,7 +494,7 @@ void Assembler::pop(Register dst) {
last_pc_[1] = 0xc4;
last_pc_[2] = 0x04;
last_pc_ = NULL;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
}
return;
@@ -498,7 +509,7 @@ void Assembler::pop(Register dst) {
// change to
// 31c0 xor eax,eax
last_pc_ = NULL;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@@ -521,7 +532,7 @@ void Assembler::pop(Register dst) {
// b8XX000000 mov eax,0x000000XX
}
last_pc_ = NULL;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@@ -533,7 +544,7 @@ void Assembler::pop(Register dst) {
last_pc_ = NULL;
// change to
// b8XXXXXXXX mov eax,0xXXXXXXXX
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@@ -776,6 +787,13 @@ void Assembler::rep_stos() {
}
+void Assembler::stos() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xAB);
+}
+
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -813,7 +831,7 @@ void Assembler::add(Register dst, const Operand& src) {
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+ if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
byte instr = last_pc_[0];
if ((instr & 0xf8) == 0x50) {
// Last instruction was a push. Check whether this is a pop without a
@@ -822,7 +840,7 @@ void Assembler::add(const Operand& dst, const Immediate& x) {
(x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
pc_ = last_pc_;
last_pc_ = NULL;
- if (FLAG_print_push_pop_elimination) {
+ if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
}
return;
@@ -2528,3 +2546,5 @@ void LogGeneratedCodeCoverage(const char* file_line) {
#endif
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 6a7effd421..9ece74432b 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -194,7 +194,6 @@ inline Hint NegateHint(Hint hint) {
class Immediate BASE_EMBEDDED {
public:
inline explicit Immediate(int x);
- inline explicit Immediate(const char* s);
inline explicit Immediate(const ExternalReference& ext);
inline explicit Immediate(Handle<Object> handle);
inline explicit Immediate(Smi* value);
@@ -551,6 +550,7 @@ class Assembler : public Malloced {
// Repetitive string instructions.
void rep_movs();
void rep_stos();
+ void stos();
// Exchange two registers
void xchg(Register dst, Register src);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 80e421bccd..608625817a 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
namespace v8 {
@@ -806,6 +808,7 @@ static void AllocateJSArray(MacroAssembler* masm,
Label* gc_required) {
ASSERT(scratch.is(edi)); // rep stos destination
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
+ ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
// Load the initial map from the array function.
__ mov(elements_array,
@@ -863,15 +866,22 @@ static void AllocateJSArray(MacroAssembler* masm,
if (fill_with_hole) {
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
-
- __ push(eax);
__ mov(eax, Factory::the_hole_value());
-
__ cld();
+ // Do not use rep stos when filling less than kRepStosThreshold
+ // words.
+ const int kRepStosThreshold = 16;
+ Label loop, entry, done;
+ __ cmp(ecx, kRepStosThreshold);
+ __ j(below, &loop); // Note: ecx > 0.
__ rep_stos();
-
- // Restore saved registers.
- __ pop(eax);
+ __ jmp(&done);
+ __ bind(&loop);
+ __ stos();
+ __ bind(&entry);
+ __ cmp(edi, Operand(elements_array_end));
+ __ j(below, &loop);
+ __ bind(&done);
}
}
@@ -970,13 +980,14 @@ static void ArrayNativeCode(MacroAssembler* masm,
AllocateJSArray(masm,
edi,
ecx,
- eax,
ebx,
+ eax,
edx,
edi,
true,
&prepare_generic_code_call);
__ IncrementCounter(&Counters::array_function_native, 1);
+ __ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
@@ -1067,7 +1078,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+ Label generic_array_code;
// Get the Array function.
GenerateLoadArrayFunction(masm, edi);
@@ -1247,3 +1258,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 63286a762a..226a374bc4 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
@@ -2979,6 +2981,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Record the position for debugging purposes.
@@ -4227,8 +4230,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array.
__ mov(edx, frame_->ElementAt(2));
- __ mov(ebx, Operand(edx, eax, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, FixedArrayElementOperand(edx, eax));
// Get the expected map from the stack or a zero map in the
// permanent slow case eax: current iteration count ebx: i'th entry
@@ -4724,43 +4726,14 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
JumpTarget slow;
JumpTarget done;
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
- // If there was no control flow to slow, we can exit early.
- if (!slow.is_linked()) return result;
- done.Jump(&result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
- __ mov(result.reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- result,
- &slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result.reg(), Factory::the_hole_value());
- done.Branch(not_equal, &result);
- __ mov(result.reg(), Factory::undefined_value());
- }
- // There is always control flow to slow from
- // ContextSlotOperandCheckExtensions so we have to jump around
- // it.
- done.Jump(&result);
- }
- }
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &result,
+ &slow,
+ &done);
slow.Bind();
// A runtime call is inevitable. We eagerly sync frame elements
@@ -4929,6 +4902,68 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
}
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ done->Jump(result);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ *result = allocator()->Allocate();
+ ASSERT(result->is_valid());
+ __ mov(result->reg(),
+ ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ cmp(result->reg(), Factory::the_hole_value());
+ done->Branch(not_equal, result);
+ __ mov(result->reg(), Factory::undefined_value());
+ }
+ done->Jump(result);
+ } else if (rewrite != NULL) {
+ // Generate fast case for calls of an argument function.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ Result arguments = allocator()->Allocate();
+ ASSERT(arguments.is_valid());
+ __ mov(arguments.reg(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ arguments,
+ slow));
+ frame_->Push(&arguments);
+ frame_->Push(key_literal->handle());
+ *result = EmitKeyedLoad();
+ done->Jump(result);
+ }
+ }
+ }
+ }
+}
+
+
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
@@ -5698,6 +5733,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval.
@@ -5747,6 +5783,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Push the name of the function onto the frame.
@@ -5765,59 +5802,26 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript examples:
//
- // with (obj) foo(1, 2, 3) // foo is in obj
+ // with (obj) foo(1, 2, 3) // foo may be in obj.
//
// function f() {};
// function g() {
// eval(...);
- // f(); // f could be in extension object
+ // f(); // f could be in extension object.
// }
// ----------------------------------
- JumpTarget slow;
- JumpTarget done;
-
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
+ JumpTarget slow, done;
Result function;
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
- function = LoadFromGlobalSlotCheckExtensions(var->slot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&function);
- LoadGlobalReceiver();
- done.Jump();
-
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = var->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- function = allocator()->Allocate();
- ASSERT(function.is_valid());
- __ mov(function.reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- function,
- &slow));
- JumpTarget push_function_and_receiver;
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(function.reg(), Factory::the_hole_value());
- push_function_and_receiver.Branch(not_equal, &function);
- __ mov(function.reg(), Factory::undefined_value());
- }
- push_function_and_receiver.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- done.Jump();
- }
- }
+
+ // Generate fast case for loading functions from slots that
+ // correspond to local/global variables or arguments unless they
+ // are shadowed by eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &function,
+ &slow,
+ &done);
slow.Bind();
// Enter the runtime system to load the function from the context.
@@ -5839,7 +5843,18 @@ void CodeGenerator::VisitCall(Call* node) {
ASSERT(!allocator()->is_used(edx));
frame_->EmitPush(edx);
- done.Bind();
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ JumpTarget call;
+ call.Jump();
+ done.Bind(&function);
+ frame_->Push(&function);
+ LoadGlobalReceiver();
+ call.Bind();
+ }
+
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -5874,6 +5889,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Push the name of the function onto the frame.
@@ -6149,11 +6165,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(less);
+ destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
obj.Unuse();
map.Unuse();
- destination()->Split(less_equal);
+ destination()->Split(below_equal);
}
@@ -6266,7 +6282,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
__ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
__ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
- null.Branch(less);
+ null.Branch(below);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@@ -6634,16 +6650,6 @@ class DeferredSearchCache: public DeferredCode {
};
-// Return a position of the element at |index_as_smi| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
-static Operand ArrayElement(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
-}
-
-
void DeferredSearchCache::Generate() {
Label first_loop, search_further, second_loop, cache_miss;
@@ -6660,11 +6666,11 @@ void DeferredSearchCache::Generate() {
__ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
__ j(less, &search_further);
- __ cmp(key_, ArrayElement(cache_, dst_));
+ __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
__ j(not_equal, &first_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, ArrayElement(cache_, dst_, 1));
+ __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&search_further);
@@ -6678,11 +6684,11 @@ void DeferredSearchCache::Generate() {
__ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ j(less_equal, &cache_miss);
- __ cmp(key_, ArrayElement(cache_, dst_));
+ __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
__ j(not_equal, &second_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, ArrayElement(cache_, dst_, 1));
+ __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&cache_miss);
@@ -6730,7 +6736,7 @@ void DeferredSearchCache::Generate() {
__ pop(ebx); // restore the key
__ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
// Store key.
- __ mov(ArrayElement(ecx, edx), ebx);
+ __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
// Store value.
@@ -6738,7 +6744,7 @@ void DeferredSearchCache::Generate() {
__ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
__ add(Operand(edx), Immediate(Smi::FromInt(1)));
__ mov(ebx, eax);
- __ mov(ArrayElement(ecx, edx), ebx);
+ __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
if (!dst_.is(eax)) {
@@ -6785,11 +6791,11 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(),
- JSFunctionResultCache::kFingerOffset));
- __ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg()));
+ JSFunctionResultCache::kFingerOffset));
+ __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
deferred->Branch(not_equal);
- __ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1));
+ __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
deferred->BindExit();
frame_->Push(&tmp);
@@ -6866,7 +6872,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(less);
+ deferred->Branch(below);
__ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
__ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(not_zero);
@@ -6888,14 +6894,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(not_zero);
// Bring addresses into index1 and index2.
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_half_pointer_size, // index1 is Smi
- FixedArray::kHeaderSize));
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_half_pointer_size, // index2 is Smi
- FixedArray::kHeaderSize));
+ __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
+ __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
// Swap elements.
__ mov(object.reg(), Operand(index1.reg(), 0));
@@ -8192,11 +8192,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(less);
+ destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
answer.Unuse();
map.Unuse();
- destination()->Split(less_equal);
+ destination()->Split(below_equal);
} else {
// Uncommon case: typeof testing against a string literal that is
// never returned from the typeof operator.
@@ -8768,11 +8768,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
deferred->Branch(not_equal);
// Store the value.
- __ mov(Operand(tmp.reg(),
- key.reg(),
- times_2,
- FixedArray::kHeaderSize - kHeapObjectTag),
- result.reg());
+ __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
deferred->BindExit();
@@ -9074,7 +9070,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize));
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
+ __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
__ cmp(ecx, Factory::undefined_value());
__ j(equal, &slow_case);
@@ -10296,6 +10292,11 @@ void IntegerConvert(MacroAssembler* masm,
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
+ if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+ return;
+ }
if (!type_info.IsInteger32() || !use_sse3) {
// Get exponent word.
__ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
@@ -11601,7 +11602,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &first_non_object);
+ __ j(below, &first_non_object);
// Return non-zero (eax is not zero)
Label return_not_equal;
@@ -11618,7 +11619,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(greater_equal, &return_not_equal);
+ __ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
__ cmp(ecx, ODDBALL_TYPE);
@@ -12266,9 +12267,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
__ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &slow, not_taken);
+ __ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(greater, &slow, not_taken);
+ __ j(above, &slow, not_taken);
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
@@ -12296,9 +12297,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &slow, not_taken);
+ __ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(greater, &slow, not_taken);
+ __ j(above, &slow, not_taken);
// Register mapping:
// eax is object map.
@@ -13296,3 +13297,5 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 5967338da2..e00bec7131 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -28,7 +28,9 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
+#include "ast.h"
#include "ic-inl.h"
+#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
@@ -343,6 +345,15 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+ // Return a position of the element at |index_as_smi| + |additional_offset|
+ // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
+ static Operand FixedArrayElementOperand(Register array,
+ Register index_as_smi,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -454,6 +465,16 @@ class CodeGenerator: public AstVisitor {
TypeofState typeof_state,
JumpTarget* slow);
+ // Support for loading from local/global variables and arguments
+ // whose location is known unless they are shadowed by
+ // eval-introduced bindings. Generates no code for unsupported slot
+ // types and therefore expects to fall through to the slow jump target.
+ void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done);
+
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 2107ad96f4..b15140f04c 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -33,6 +33,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "cpu.h"
#include "macro-assembler.h"
@@ -77,3 +79,5 @@ void CPU::DebugBreak() {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index d142b11cf7..9780f3b09e 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "debug.h"
@@ -261,3 +263,5 @@ const int Debug::kFrameDropperFrameSize = 5;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 8d342e087c..58c22afcd3 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -30,6 +30,9 @@
#include <stdarg.h>
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "disasm.h"
namespace disasm {
@@ -90,6 +93,7 @@ static ByteMnemonic zero_operands_instr[] = {
{0x99, "cdq", UNSET_OP_ORDER},
{0x9B, "fwait", UNSET_OP_ORDER},
{0xFC, "cld", UNSET_OP_ORDER},
+ {0xAB, "stos", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
@@ -1438,3 +1442,5 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
} // namespace disasm
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index 61e2b5edfc..b749e594bc 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "data-flow.h"
@@ -948,3 +950,5 @@ void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 5c900bedd7..212cfdeaa0 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "frames-inl.h"
namespace v8 {
@@ -109,3 +111,5 @@ Address InternalFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index e9838ada77..368a8eeb0b 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -79,11 +81,17 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
bool function_in_register = true;
// Possibly allocate a local context.
- if (scope()->num_heap_slots() > 0) {
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
- __ CallRuntime(Runtime::kNewContext, 1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
function_in_register = false;
// Context is returned in both eax and esi. It replaces the context
// passed to us. It's saved in the stack and kept live in esi.
@@ -140,7 +148,18 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
}
{ Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ // Visit all the explicit declarations unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ scope()->VisitIllegalRedeclaration(this);
+ } else {
+ VisitDeclarations(scope()->declarations());
+ }
}
{ Comment cmnt(masm_, "[ Stack check");
@@ -425,6 +444,39 @@ void FullCodeGenerator::DropAndApply(int count,
}
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ // In an effect context, the true and the false case branch to the
+ // same label.
+ *if_true = *if_false = materialize_true;
+ break;
+ case Expression::kValue:
+ *if_true = materialize_true;
+ *if_false = materialize_false;
+ break;
+ case Expression::kTest:
+ *if_true = true_label_;
+ *if_false = false_label_;
+ break;
+ case Expression::kValueTest:
+ *if_true = materialize_true;
+ *if_false = false_label_;
+ break;
+ case Expression::kTestValue:
+ *if_true = true_label_;
+ *if_false = materialize_false;
+ break;
+ }
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
@@ -490,6 +542,61 @@ void FullCodeGenerator::Apply(Expression::Context context,
}
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Handle<Object> value =
+ flag ? Factory::true_value() : Factory::false_value();
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), value);
+ break;
+ case kStack:
+ __ push(Immediate(value));
+ break;
+ }
+ break;
+ }
+ case Expression::kTest:
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ // If value is false it's needed.
+ if (!flag) __ mov(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ // If value is false it's needed.
+ if (!flag) __ push(Immediate(Factory::false_value()));
+ break;
+ }
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ // If value is true it's needed.
+ if (flag) __ mov(result_register(), Factory::true_value());
+ break;
+ case kStack:
+ // If value is true it's needed.
+ if (flag) __ push(Immediate(Factory::true_value()));
+ break;
+ }
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ }
+}
+
+
void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
@@ -665,22 +772,22 @@ void FullCodeGenerator::Move(Slot* dst,
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function) {
Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
+ ASSERT(variable != NULL); // Must have been resolved.
+ Slot* slot = variable->slot();
+ Property* prop = variable->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
+ if (mode == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(slot)),
Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kAccumulator);
+ } else if (function != NULL) {
+ VisitForValue(function, kAccumulator);
__ mov(Operand(ebp, SlotOffset(slot)), result_register());
}
break;
@@ -690,7 +797,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context.
// The variable in the decl always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ mov(ebx,
@@ -698,12 +805,12 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
}
- if (decl->mode() == Variable::CONST) {
- __ mov(eax, Immediate(Factory::the_hole_value()));
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ if (mode == Variable::CONST) {
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()),
+ Immediate(Factory::the_hole_value()));
// No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kAccumulator);
+ } else if (function != NULL) {
+ VisitForValue(function, kAccumulator);
__ mov(CodeGenerator::ContextOperand(esi, slot->index()),
result_register());
int offset = Context::SlotOffset(slot->index());
@@ -714,21 +821,19 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
case Slot::LOOKUP: {
__ push(esi);
- __ push(Immediate(var->name()));
+ __ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
+ if (mode == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kStack);
+ } else if (function != NULL) {
+ VisitForValue(function, kStack);
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
@@ -738,13 +843,13 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
}
} else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForValue(prop->obj(), kStack);
- if (decl->fun() != NULL) {
+ if (function != NULL) {
VisitForValue(prop->key(), kStack);
- VisitForValue(decl->fun(), kAccumulator);
+ VisitForValue(function, kAccumulator);
__ pop(ecx);
} else {
VisitForValue(prop->key(), kAccumulator);
@@ -763,6 +868,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
@@ -773,19 +883,225 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+ // Keep the switch value on the stack until a case matches.
+ VisitForValue(stmt->tag(), kStack);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForValue(clause->label(), kAccumulator);
+
+ // Perform the comparison as if via '==='. The comparison stub expects
+ // the smi vs. smi case to be handled before it is called.
+ Label slow_case;
+ __ mov(edx, Operand(esp, 0)); // Switch value.
+ __ mov(ecx, edx);
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+
+ __ bind(&slow_case);
+ CompareStub stub(equal, true);
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ }
- // Build the shared function info and instantiate the function based
- // on it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script(), this);
- if (HasStackOverflow()) return;
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ jmp(nested_statement.break_target());
+ } else {
+ __ jmp(default_clause->body_target()->entry_label());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target()->entry_label());
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. Both SpiderMonkey and JSC
+ // ignore null and undefined in contrast to the specification; see
+ // ECMA-262 section 12.6.4.
+ VisitForValue(stmt->enumerable(), kAccumulator);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &exit);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &convert);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(eax);
- // Create a new closure.
- __ push(esi);
- __ push(Immediate(function_info));
- __ CallRuntime(Runtime::kNewClosure, 2);
+ // TODO(kasperl): Check cache validity in generated code. This is a
+ // fast case for the JSObject::IsSimpleEnum cache validity
+ // checks. If we cannot guarantee cache validity, call the runtime
+ // system to check cache validity or get the property names in a
+ // fixed array.
+
+ // Get the set of properties to enumerate.
+ __ push(eax); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
+ __ j(not_equal, &fixed_array);
+
+ // We got a map in register eax. Get the enumeration cache from it.
+ __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+ __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Setup the four remaining stack slots.
+ __ push(eax); // Map.
+ __ push(edx); // Enumeration cache.
+ __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
+ __ SmiTag(eax);
+ __ push(eax); // Enumeration cache length (as smi).
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ jmp(&loop);
+
+ // We got a fixed array in register eax. Iterate through that.
+ __ bind(&fixed_array);
+ __ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
+ __ push(eax);
+ __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ SmiTag(eax);
+ __ push(eax); // Fixed array length (as smi).
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
+ __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
+ __ j(above_equal, loop_statement.break_target());
+
+ // Get the current entry of the array into register ebx.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case into register edx.
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ Label update_each;
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ j(equal, &update_each);
+
+ // Convert the entry to a string or null if it isn't a property
+ // anymore. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(ecx); // Enumerable.
+ __ push(ebx); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, loop_statement.continue_target());
+ __ mov(ebx, Operand(eax));
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register ebx.
+ __ bind(&update_each);
+ __ mov(result_register(), ebx);
+ // Perform the assignment as if via '='.
+ EmitAssignment(stmt->each());
+
+ // Generate code for the body of the loop.
+ Label stack_limit_hit, stack_check_done;
+ Visit(stmt->body());
+
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_done);
+
+ // Generate code for going to the next element by incrementing the
+ // index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_target());
+ __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+
+ // Slow case for the stack limit check.
+ StackCheckStub stack_check_stub;
+ __ bind(&stack_limit_hit);
+ __ CallStub(&stack_check_stub);
+ __ jmp(&stack_check_done);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_target());
+ __ add(Operand(esp), Immediate(5 * kPointerSize));
+
+ // Exit and decrement the loop depth.
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && info->num_literals() == 0) {
+ FastNewClosureStub stub;
+ __ push(Immediate(info));
+ __ CallStub(&stub);
+ } else {
+ __ push(esi);
+ __ push(Immediate(info));
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ }
Apply(context_, eax);
}
@@ -830,7 +1146,20 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
- Apply(context, slot);
+ if (var->mode() == Variable::CONST) {
+ // Constants may be the hole value if they have not been initialized.
+ // Unhole them.
+ Label done;
+ MemOperand slot_operand = EmitSlotSearch(slot, eax);
+ __ mov(eax, slot_operand);
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &done);
+ __ mov(eax, Factory::undefined_value());
+ __ bind(&done);
+ Apply(context, eax);
+ } else {
+ Apply(context, slot);
+ }
} else {
Comment cmnt(masm_, "Rewritten parameter");
@@ -966,22 +1295,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_elements()));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ __ CallStub(&stub);
}
bool result_saved = false; // Is the result saved to the stack?
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
+ for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
@@ -1016,7 +1351,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() != Token::INIT_CONST);
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1095,6 +1436,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op(),
context_);
break;
case NAMED_PROPERTY:
@@ -1137,15 +1479,66 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
}
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ mov(edx, eax);
+ __ pop(eax); // Restore value.
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ mov(ecx, eax);
+ __ pop(edx);
+ __ pop(eax); // Restore value.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
+ break;
+ }
+ }
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op,
Expression::Context context) {
- // Three main cases: global variables, lookup slots, and all other
- // types of slots. Left-hand-side parameters that rewrite to
- // explicit property accesses do not reach here.
+ // Left-hand sides that rewrite to explicit property accesses do not reach
+ // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
- Slot* slot = var->slot();
if (var->is_global()) {
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
@@ -1156,44 +1549,61 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
- Apply(context, eax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, eax);
-
- } else if (slot != NULL) {
+ } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+ // Perform the assignment for non-const variables and for initialization
+ // of const variables. Const assignments are simply skipped.
+ Label done;
+ Slot* slot = var->slot();
switch (slot->type()) {
- case Slot::LOCAL:
case Slot::PARAMETER:
- __ mov(Operand(ebp, SlotOffset(slot)), result_register());
+ case Slot::LOCAL:
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ mov(edx, Operand(ebp, SlotOffset(slot)));
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &done);
+ }
+ // Perform the assignment.
+ __ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, ecx);
- __ mov(target, result_register());
-
- // RecordWrite may destroy all its register arguments.
- __ mov(edx, result_register());
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ mov(edx, target);
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &done);
+ }
+ // Perform the assignment and issue the write barrier.
+ __ mov(target, eax);
+ // The value of the assignment is in eax. RecordWrite clobbers its
+ // register arguments.
+ __ mov(edx, eax);
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ RecordWrite(ecx, offset, edx, ebx);
break;
}
case Slot::LOOKUP:
- UNREACHABLE();
+ // Call the runtime for the assignment. The runtime will ignore
+ // const reinitialization.
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ if (op == Token::INIT_CONST) {
+ // The runtime will ignore const redeclaration.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
break;
}
- Apply(context, result_register());
-
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
+ __ bind(&done);
}
+
+ Apply(context, eax);
}
@@ -1327,7 +1737,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -1341,16 +1752,62 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Variable* var = fun->AsVariableProxy()->AsVariable();
if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ VisitForValue(fun, kStack);
+ __ push(Immediate(Factory::undefined_value())); // Reserved receiver slot.
+
+ // Push the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Push copy of the function - found below the arguments.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(esp, arg_count * kPointerSize));
+ } else {
+ __ push(Immediate(Factory::undefined_value()));
+ }
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+ // The runtime call returns a pair of values in eax (function) and
+ // edx (receiver). Touch up the stack with the right values.
+ __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ DropAndApply(1, context_, eax);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Push global object as receiver for the call IC.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
+ // Call to a lookup slot (dynamically introduced variable). Call the
+ // runtime to find the function to call (returned in eax) and the object
+ // holding it (returned in edx).
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ push(eax); // Function.
+ __ push(edx); // Receiver.
+ EmitCallWithStub(expr);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -1447,7 +1904,730 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+ EmitIsSmi(expr->arguments());
+ } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+ EmitIsNonNegativeSmi(expr->arguments());
+ } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+ EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+ EmitIsUndetectableObject(expr->arguments());
+ } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+ EmitIsFunction(expr->arguments());
+ } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+ EmitIsArray(expr->arguments());
+ } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+ EmitIsRegExp(expr->arguments());
+ } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+ EmitIsConstructCall(expr->arguments());
+ } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+ EmitObjectEquals(expr->arguments());
+ } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+ EmitArguments(expr->arguments());
+ } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+ EmitArgumentsLength(expr->arguments());
+ } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+ EmitClassOf(expr->arguments());
+ } else if (strcmp("_Log", *name->ToCString()) == 0) {
+ EmitLog(expr->arguments());
+ } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+ EmitRandomHeapNumber(expr->arguments());
+ } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+ EmitSubString(expr->arguments());
+ } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+ EmitRegExpExec(expr->arguments());
+ } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+ EmitValueOf(expr->arguments());
+ } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+ EmitSetValueOf(expr->arguments());
+ } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+ EmitNumberToString(expr->arguments());
+ } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+ EmitCharFromCode(expr->arguments());
+ } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+ EmitFastCharCodeAt(expr->arguments());
+ } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+ EmitStringAdd(expr->arguments());
+ } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+ EmitStringCompare(expr->arguments());
+ } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+ EmitMathPow(expr->arguments());
+ } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+ EmitMathSin(expr->arguments());
+ } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+ EmitMathCos(expr->arguments());
+ } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+ EmitMathSqrt(expr->arguments());
+ } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+ EmitCallFunction(expr->arguments());
+ } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+ EmitRegExpConstructResult(expr->arguments());
+ } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+ EmitSwapElements(expr->arguments());
+ } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+ EmitGetFromCache(expr->arguments());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+ __ j(zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, if_true);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, if_false);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ j(below_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(equal, if_false);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(equal, if_false);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Get the frame pointer for the calling frame.
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker);
+ __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ pop(ebx);
+ __ cmp(eax, Operand(ebx));
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in edx and the formal
+ // parameter count in eax.
+ VisitForValue(args->at(0), kAccumulator);
+ __ mov(edx, eax);
+ __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ // If the object is a smi, we return null.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(ebx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &null);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ cmp(ebx, JS_FUNCTION_TYPE);
+ __ j(equal, &function);
+
+ // Check if the constructor in the map is a function.
+ __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &non_function_constructor);
+
+ // eax now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ jmp(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ mov(eax, Factory::function_class_symbol());
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ mov(eax, Factory::Object_symbol());
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, Factory::null_value());
+
+ // All done.
+ __ bind(&done);
+
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ mov(eax, Factory::undefined_value());
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+
+ __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ // To allocate a heap number, and ensure that it is not a smi, we
+ // call the runtime function FUnaryMinus on 0, returning the double
+ // -0.0. A new, distinct heap number is returned each time.
+ __ push(Immediate(Smi::FromInt(0)));
+ __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ __ mov(edi, eax);
+
+ __ bind(&heapnumber_allocated);
+
+ __ PrepareCallCFunction(0, ebx);
+ __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+ // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ // This is implemented on both SSE2 and FPU.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm1, Operand(ebx));
+ __ movd(xmm0, Operand(eax));
+ __ cvtss2sd(xmm1, xmm1);
+ __ pxor(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+ __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // 0x4130000000000000 is 1.0 x 2^20 as a double.
+ __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
+ Immediate(0x41300000));
+ __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
+ __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+ __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
+ __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+ __ fsubp(1);
+ __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, edi);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ASSERT(args->length() == 4);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ VisitForValue(args->at(3), kStack);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
+ __ j(not_equal, &done);
+ __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
+
+ __ bind(&done);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the runtime function.
+ ASSERT(args->length() == 2);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack); // Load the object.
+ VisitForValue(args->at(1), kAccumulator); // Load the value.
+ __ pop(ebx); // eax = value. ebx = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // If the object is not a value type, return the value.
+ __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
+ __ j(not_equal, &done);
+
+ // Store the value.
+ __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+
+ __ bind(&done);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and call the stub.
+ VisitForValue(args->at(0), kStack);
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label slow_case, done;
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ test(eax,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case);
+ __ Set(ebx, Immediate(Factory::single_character_string_cache()));
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize == 1);
+ ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ascii char code.
+ __ mov(ebx, FieldOperand(ebx,
+ eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(equal, &slow_case);
+ __ mov(eax, ebx);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+
+ __ bind(&done);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+ // TODO(fsc): Port the complete implementation from the classic back-end.
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ Apply(context_, eax);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the runtime function.
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // For receiver and function.
+ VisitForValue(args->at(0), kStack); // Receiver.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i + 1), kStack);
+ }
+ VisitForValue(args->at(arg_count + 1), kAccumulator); // Function.
+
+ // InvokeFunction requires function in edi. Move it in there.
+ if (!result_register().is(edi)) __ mov(edi, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(edi, count, CALL_FUNCTION);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ Top::global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ __ mov(eax, Factory::undefined_value());
+ Apply(context_, eax);
+ return;
+ }
+
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register key = eax;
+ Register cache = ebx;
+ Register tmp = ecx;
+ __ mov(cache, CodeGenerator::ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ mov(cache,
+ CodeGenerator::ContextOperand(
+ cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done, not_found;
+ // tmp now holds finger offset as a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
+ __ j(not_equal, &not_found);
+
+ __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
+ __ jmp(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ push(cache);
+ __ push(key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1481,6 +2661,46 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* prop = expr->expression()->AsProperty();
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (prop == NULL && var == NULL) {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ Apply(context_, true);
+ } else if (var != NULL &&
+ !var->is_global() &&
+ var->slot() != NULL &&
+ var->slot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ Apply(context_, false);
+ } else {
+ // Property or variable reference. Call the delete builtin with
+ // object and property name as arguments.
+ if (prop != NULL) {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ } else if (var->is_global()) {
+ __ push(CodeGenerator::GlobalObject());
+ __ push(Immediate(var->name()));
+ } else {
+ // Non-global variable. Call the runtime to look up the context
+ // where the variable was introduced.
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLookupContext, 2);
+ __ push(eax);
+ __ push(Immediate(var->name()));
+ }
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ Apply(context_, eax);
+ }
+ break;
+ }
+
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
@@ -1521,33 +2741,15 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false, done;
- // Initially assume a pure test context. Notice that the labels are
- // swapped.
- Label* if_true = false_label_;
- Label* if_false = true_label_;
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &materialize_false;
- if_false = &materialize_true;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_false = &materialize_true;
- break;
- case Expression::kTestValue:
- if_true = &materialize_false;
- break;
- }
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+
+ // Notice that the labels are swapped.
+ PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
VisitForControl(expr->expression(), if_true, if_false);
+
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
@@ -1643,6 +2845,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
// Expression can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
@@ -1664,7 +2872,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
- } else {
+ } else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ push(Immediate(Smi::FromInt(0)));
@@ -1754,7 +2962,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
+ // Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
Expression::kEffect);
// For all contexts except kEffect: We have the result on
// top of the stack.
@@ -1762,7 +2972,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
ApplyTOS(context_);
}
} else {
+ // Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
context_);
}
break;
@@ -1840,36 +3052,41 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::EmitNullCompare(bool strict,
+ Register obj,
+ Register null_const,
+ Label* if_true,
+ Label* if_false,
+ Register scratch) {
+ __ cmp(obj, Operand(null_const));
+ if (strict) {
+ __ j(equal, if_true);
+ } else {
+ __ j(equal, if_true);
+ __ cmp(obj, Factory::undefined_value());
+ __ j(equal, if_true);
+ __ test(obj, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // It can be an undetectable object.
+ __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+ __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_true);
+ }
+ __ jmp(if_false);
+}
+
+
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &materialize_true;
- if_false = &materialize_false;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &materialize_true;
- break;
- case Expression::kTestValue:
- if_false = &materialize_false;
- break;
- }
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
VisitForValue(expr->left(), kStack);
switch (expr->op()) {
@@ -1899,10 +3116,24 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ:
+ case Token::EQ: {
cc = equal;
__ pop(edx);
+ // If either operand is constant null we do a fast compare
+ // against null.
+ Literal* right_literal = expr->right()->AsLiteral();
+ Literal* left_literal = expr->left()->AsLiteral();
+ if (right_literal != NULL && right_literal->handle()->IsNull()) {
+ EmitNullCompare(strict, edx, eax, if_true, if_false, ecx);
+ Apply(context_, if_true, if_false);
+ return;
+ } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+ EmitNullCompare(strict, eax, edx, if_true, if_false, ecx);
+ Apply(context_, if_true, if_false);
+ return;
+ }
break;
+ }
case Token::LT:
cc = less;
__ pop(edx);
@@ -2012,3 +3243,5 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index bc7a33c6cc..644d20072e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
@@ -868,7 +870,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ecx: key (a smi)
// edx: receiver
// edi: FixedArray receiver->elements
- __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+ __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, Operand(eax));
__ RecordWrite(edi, 0, edx, ecx);
@@ -1643,3 +1645,5 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/jump-target-ia32.cc b/deps/v8/src/ia32/jump-target-ia32.cc
index cba6508031..76c0d02d4f 100644
--- a/deps/v8/src/ia32/jump-target-ia32.cc
+++ b/deps/v8/src/ia32/jump-target-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@@ -431,3 +433,5 @@ void BreakTarget::Bind(Result* arg) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a7d2834520..ba2fe2dd4e 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
@@ -1706,3 +1708,5 @@ CodePatcher::~CodePatcher() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index fdf3b9febb..b0de82752b 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "unicode.h"
#include "log.h"
#include "ast.h"
@@ -51,7 +54,7 @@ namespace internal {
* - esp : points to tip of C stack.
* - ecx : points to tip of backtrack stack
*
- * The registers eax, ebx and ecx are free to use for computations.
+ * The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
@@ -72,8 +75,6 @@ namespace internal {
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - Boolean at start (if 1, we are starting at the start of the string,
- * otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -178,8 +179,8 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kAtStart), Immediate(0));
- BranchOrBacktrack(equal, &not_at_start);
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(eax, Operand(esi, edi, times_1, 0));
__ cmp(eax, Operand(ebp, kInputStart));
@@ -190,8 +191,8 @@ void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kAtStart), Immediate(0));
- BranchOrBacktrack(equal, on_not_at_start);
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(eax, Operand(esi, edi, times_1, 0));
__ cmp(eax, Operand(ebp, kInputStart));
@@ -209,6 +210,15 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
+#ifdef DEBUG
+ // If input is ASCII, don't even bother calling here if the string to
+ // match contains a non-ascii character.
+ if (mode_ == ASCII) {
+ for (int i = 0; i < str.length(); i++) {
+ ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
+ }
+ }
+#endif
int byte_length = str.length() * char_size();
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
@@ -222,14 +232,56 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
on_failure = &backtrack_label_;
}
- for (int i = 0; i < str.length(); i++) {
+ // Do one character test first to minimize loading for the case that
+ // we don't match at all (loading more than one character introduces that
+ // chance of reading unaligned and reading across cache boundaries).
+ // If the first character matches, expect a larger chance of matching the
+ // string, and start loading more characters at a time.
+ if (mode_ == ASCII) {
+ __ cmpb(Operand(esi, edi, times_1, byte_offset),
+ static_cast<int8_t>(str[0]));
+ } else {
+ // Don't use 16-bit immediate. The size changing prefix throws off
+ // pre-decoding.
+ __ movzx_w(eax,
+ Operand(esi, edi, times_1, byte_offset));
+ __ cmp(eax, static_cast<int32_t>(str[0]));
+ }
+ BranchOrBacktrack(not_equal, on_failure);
+
+ __ lea(ebx, Operand(esi, edi, times_1, 0));
+ for (int i = 1, n = str.length(); i < n;) {
if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset + i),
- static_cast<int8_t>(str[i]));
+ if (i <= n - 4) {
+ int combined_chars =
+ (static_cast<uint32_t>(str[i + 0]) << 0) |
+ (static_cast<uint32_t>(str[i + 1]) << 8) |
+ (static_cast<uint32_t>(str[i + 2]) << 16) |
+ (static_cast<uint32_t>(str[i + 3]) << 24);
+ __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
+ i += 4;
+ } else {
+ __ cmpb(Operand(ebx, byte_offset + i),
+ static_cast<int8_t>(str[i]));
+ i += 1;
+ }
} else {
ASSERT(mode_ == UC16);
- __ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(str[i]));
+ if (i <= n - 2) {
+ __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
+ Immediate(*reinterpret_cast<const int*>(&str[i])));
+ i += 2;
+ } else {
+ // Avoid a 16-bit immediate operation. It uses the length-changing
+ // 0x66 prefix which causes pre-decoder misprediction and pipeline
+ // stalls. See
+ // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
+ // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
+ __ movzx_w(eax,
+ Operand(ebx, byte_offset + i * sizeof(uc16)));
+ __ cmp(eax, static_cast<int32_t>(str[i]));
+ i += 1;
+ }
}
BranchOrBacktrack(not_equal, on_failure);
}
@@ -625,7 +677,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
- __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -677,14 +728,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
- // Register ebx still holds -stringIndex.
- __ test(ebx, Operand(ebx));
- __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
- __ mov(Operand(ebp, kAtStart), ecx);
-
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
@@ -712,8 +755,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
// Load previous char as initial value of current-character.
Label at_start;
- __ cmp(Operand(ebp, kAtStart), Immediate(0));
- __ j(not_equal, &at_start);
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
@@ -1201,3 +1244,5 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 823bc03312..8b8eeed6ce 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -132,9 +132,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc
index 73fefb3bbf..d840c0cc5c 100644
--- a/deps/v8/src/ia32/register-allocator-ia32.cc
+++ b/deps/v8/src/ia32/register-allocator-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
@@ -151,3 +153,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 189c0e4d16..d7b05cf40b 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@@ -2387,3 +2389,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 10aaa52b83..e22df6ec28 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -1310,3 +1312,5 @@ void VirtualFrame::Push(Expression* expr) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 14fe4662dc..a8f23b0cc8 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -28,9 +28,10 @@
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-#include "type-info.h"
+#include "codegen.h"
#include "register-allocator.h"
#include "scopes.h"
+#include "type-info.h"
namespace v8 {
namespace internal {
@@ -97,23 +98,16 @@ class VirtualFrame: public ZoneObject {
return register_locations_[num];
}
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
+ inline int register_location(Register reg);
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
+ inline void set_register_location(Register reg, int index);
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
- bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
- }
+ inline bool is_used(Register reg);
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
@@ -150,6 +144,9 @@ class VirtualFrame: public ZoneObject {
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Spill the top element of the frame.
+ void SpillTop() { SpillElementAt(element_count() - 1); }
+
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
@@ -217,10 +214,7 @@ class VirtualFrame: public ZoneObject {
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
- }
+ inline void SetElementAt(int index, Handle<Object> value);
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
@@ -315,10 +309,7 @@ class VirtualFrame: public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
- }
+ inline Result CallStub(CodeStub* stub, int arg_count);
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@@ -361,7 +352,7 @@ class VirtualFrame: public ZoneObject {
Result CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. Key and receiver are not dropped.
+ // of the frame. All three are dropped.
Result CallKeyedStoreIC();
// Call call IC. Function name, arguments, and receiver are found on top
@@ -473,12 +464,9 @@ class VirtualFrame: public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() {
- return cgen()->scope()->num_parameters();
- }
- int local_count() {
- return cgen()->scope()->num_stack_slots();
- }
+ inline int parameter_count();
+
+ inline int local_count();
// The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address
diff --git a/deps/v8/src/jump-target-heavy.cc b/deps/v8/src/jump-target-heavy.cc
index 85620a2d96..468cf4a542 100644
--- a/deps/v8/src/jump-target-heavy.cc
+++ b/deps/v8/src/jump-target-heavy.cc
@@ -35,6 +35,9 @@ namespace v8 {
namespace internal {
+bool JumpTarget::compiling_deferred_code_ = false;
+
+
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen()->has_valid_frame());
@@ -360,4 +363,64 @@ DeferredCode::DeferredCode()
}
}
+
+void JumpTarget::Unuse() {
+ reaching_frames_.Clear();
+ merge_labels_.Clear();
+ entry_frame_ = NULL;
+ entry_label_.Unuse();
+}
+
+
+void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
+ ASSERT(reaching_frames_.length() == merge_labels_.length());
+ ASSERT(entry_frame_ == NULL);
+ Label fresh;
+ merge_labels_.Add(fresh);
+ reaching_frames_.Add(frame);
+}
+
+
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
+
+void BreakTarget::set_direction(Directionality direction) {
+ JumpTarget::set_direction(direction);
+ ASSERT(cgen()->has_valid_frame());
+ expected_height_ = cgen()->frame()->height();
+}
+
+
+void BreakTarget::CopyTo(BreakTarget* destination) {
+ ASSERT(destination != NULL);
+ destination->direction_ = direction_;
+ destination->reaching_frames_.Rewind(0);
+ destination->reaching_frames_.AddAll(reaching_frames_);
+ destination->merge_labels_.Rewind(0);
+ destination->merge_labels_.AddAll(merge_labels_);
+ destination->entry_frame_ = entry_frame_;
+ destination->entry_label_ = entry_label_;
+ destination->expected_height_ = expected_height_;
+}
+
+
+void BreakTarget::Branch(Condition cc, Hint hint) {
+ ASSERT(cgen()->has_valid_frame());
+
+ int count = cgen()->frame()->height() - expected_height_;
+ if (count > 0) {
+ // We negate and branch here rather than using DoBranch's negate
+ // and branch. This gives us a hook to remove statement state
+ // from the frame.
+ JumpTarget fall_through;
+ // Branch to fall through will not negate, because it is a
+ // forward-only target.
+ fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+ Jump(); // May emit merge code here.
+ fall_through.Bind();
+ } else {
+ DoBranch(cc, hint);
+ }
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target-heavy.h b/deps/v8/src/jump-target-heavy.h
new file mode 100644
index 0000000000..b923fe57f3
--- /dev/null
+++ b/deps/v8/src/jump-target-heavy.h
@@ -0,0 +1,242 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_HEAVY_H_
+#define V8_JUMP_TARGET_HEAVY_H_
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class FrameElement;
+class Result;
+class VirtualFrame;
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code. It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths. When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame. For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
+ public:
+ // Forward-only jump targets can only be reached by forward CFG edges.
+ enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+ // Construct a jump target used to generate code and to provide
+ // access to a current frame.
+ explicit JumpTarget(Directionality direction)
+ : direction_(direction),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL) {
+ }
+
+ // Construct a jump target.
+ JumpTarget()
+ : direction_(FORWARD_ONLY),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL) {
+ }
+
+ virtual ~JumpTarget() {}
+
+ // Set the direction of the jump target.
+ virtual void set_direction(Directionality direction) {
+ direction_ = direction;
+ }
+
+ // Treat the jump target as a fresh one. The state is reset.
+ void Unuse();
+
+ inline CodeGenerator* cgen();
+
+ Label* entry_label() { return &entry_label_; }
+
+ VirtualFrame* entry_frame() const { return entry_frame_; }
+ void set_entry_frame(VirtualFrame* frame) {
+ entry_frame_ = frame;
+ }
+
+ // Predicates testing the state of the encapsulated label.
+ bool is_bound() const { return entry_label_.is_bound(); }
+ bool is_linked() const {
+ return !is_bound() && !reaching_frames_.is_empty();
+ }
+ bool is_unused() const {
+ // This is !is_bound() && !is_linked().
+ return !is_bound() && reaching_frames_.is_empty();
+ }
+
+ // Emit a jump to the target. There must be a current frame at the
+ // jump and there will be no current frame after the jump.
+ virtual void Jump();
+ virtual void Jump(Result* arg);
+
+ // Emit a conditional branch to the target. There must be a current
+ // frame at the branch. The current frame will fall through to the
+ // code after the branch. The arg is a result that is live both at
+ // the target and the fall-through.
+ virtual void Branch(Condition cc, Hint hint = no_hint);
+ virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+ virtual void Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Hint hint = no_hint);
+
+ // Bind a jump target. If there is no current frame at the binding
+ // site, there must be at least one frame reaching via a forward
+ // jump.
+ virtual void Bind();
+ virtual void Bind(Result* arg);
+ virtual void Bind(Result* arg0, Result* arg1);
+
+ // Emit a call to a jump target. There must be a current frame at
+ // the call. The frame at the target is the same as the current
+ // frame except for an extra return address on top of it. The frame
+ // after the call is the same as the frame before the call.
+ void Call();
+
+ static void set_compiling_deferred_code(bool flag) {
+ compiling_deferred_code_ = flag;
+ }
+
+ protected:
+ // Directionality flag set at initialization time.
+ Directionality direction_;
+
+ // A list of frames reaching this block via forward jumps.
+ ZoneList<VirtualFrame*> reaching_frames_;
+
+ // A parallel list of labels for merge code.
+ ZoneList<Label> merge_labels_;
+
+ // The frame used on entry to the block and expected at backward
+ // jumps to the block. Set when the jump target is bound, but may
+ // or may not be set for forward-only blocks.
+ VirtualFrame* entry_frame_;
+
+ // The actual entry label of the block.
+ Label entry_label_;
+
+ // Implementations of Jump, Branch, and Bind with all arguments and
+ // return values using the virtual frame.
+ void DoJump();
+ void DoBranch(Condition cc, Hint hint);
+ void DoBind();
+
+ private:
+ static bool compiling_deferred_code_;
+
+ // Add a virtual frame reaching this labeled block via a forward jump,
+ // and a corresponding merge code label.
+ void AddReachingFrame(VirtualFrame* frame);
+
+ // Perform initialization required during entry frame computation
+ // after setting the virtual frame element at index in frame to be
+ // target.
+ inline void InitializeEntryElement(int index, FrameElement* target);
+
+ // Compute a frame to use for entry to this block.
+ void ComputeEntryFrame();
+
+ DISALLOW_COPY_AND_ASSIGN(JumpTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally). They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+ // Construct a break target.
+ BreakTarget() {}
+
+ virtual ~BreakTarget() {}
+
+ // Set the direction of the break target.
+ virtual void set_direction(Directionality direction);
+
+ // Copy the state of this break target to the destination. The
+ // lists of forward-reaching frames and merge-point labels are
+ // copied. All virtual frame pointers are copied, not the
+ // pointed-to frames. The previous state of the destination is
+ // overwritten, without deallocating pointed-to virtual frames.
+ void CopyTo(BreakTarget* destination);
+
+ // Emit a jump to the target. There must be a current frame at the
+ // jump and there will be no current frame after the jump.
+ virtual void Jump();
+ virtual void Jump(Result* arg);
+
+ // Emit a conditional branch to the target. There must be a current
+ // frame at the branch. The current frame will fall through to the
+ // code after the branch.
+ virtual void Branch(Condition cc, Hint hint = no_hint);
+ virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+
+ // Bind a break target. If there is no current frame at the binding
+ // site, there must be at least one frame reaching via a forward
+ // jump.
+ virtual void Bind();
+ virtual void Bind(Result* arg);
+
+ // Setter for expected height.
+ void set_expected_height(int expected) { expected_height_ = expected; }
+
+ private:
+ // The expected height of the expression stack where the target will
+ // be bound, statically known at initialization time.
+ int expected_height_;
+
+ DISALLOW_COPY_AND_ASSIGN(BreakTarget);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_JUMP_TARGET_HEAVY_H_
diff --git a/deps/v8/src/jump-target-light-inl.h b/deps/v8/src/jump-target-light-inl.h
index 8d6c3ac516..0b4eee40c8 100644
--- a/deps/v8/src/jump-target-light-inl.h
+++ b/deps/v8/src/jump-target-light-inl.h
@@ -33,10 +33,20 @@
namespace v8 {
namespace internal {
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
- UNIMPLEMENTED();
+// Construct a jump target.
+JumpTarget::JumpTarget(Directionality direction)
+ : entry_frame_set_(false),
+ entry_frame_(kInvalidVirtualFrameInitializer) {
}
+JumpTarget::JumpTarget()
+ : entry_frame_set_(false),
+ entry_frame_(kInvalidVirtualFrameInitializer) {
+}
+
+
+BreakTarget::BreakTarget() { }
+
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/deps/v8/src/jump-target-light.cc b/deps/v8/src/jump-target-light.cc
index befb430736..76c3cb7f6b 100644
--- a/deps/v8/src/jump-target-light.cc
+++ b/deps/v8/src/jump-target-light.cc
@@ -34,53 +34,76 @@ namespace v8 {
namespace internal {
-void JumpTarget::Jump(Result* arg) {
- UNIMPLEMENTED();
-}
+DeferredCode::DeferredCode()
+ : masm_(CodeGeneratorScope::Current()->masm()),
+ statement_position_(masm_->current_statement_position()),
+ position_(masm_->current_position()) {
+ ASSERT(statement_position_ != RelocInfo::kNoPosition);
+ ASSERT(position_ != RelocInfo::kNoPosition);
+ CodeGeneratorScope::Current()->AddDeferred(this);
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
- UNIMPLEMENTED();
+#ifdef DEBUG
+ CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
+#endif
}
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
- UNIMPLEMENTED();
-}
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
- UNIMPLEMENTED();
+void BreakTarget::SetExpectedHeight() {
+ expected_height_ = cgen()->frame()->height();
}
-void JumpTarget::Bind(Result* arg) {
- UNIMPLEMENTED();
-}
-
+void BreakTarget::Jump() {
+ ASSERT(cgen()->has_valid_frame());
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
- UNIMPLEMENTED();
+ int count = cgen()->frame()->height() - expected_height_;
+ if (count > 0) {
+ cgen()->frame()->Drop(count);
+ }
+ DoJump();
}
-void JumpTarget::ComputeEntryFrame() {
- UNIMPLEMENTED();
+void BreakTarget::Branch(Condition cc, Hint hint) {
+ if (cc == al) {
+ Jump();
+ return;
+ }
+
+ ASSERT(cgen()->has_valid_frame());
+
+ int count = cgen()->frame()->height() - expected_height_;
+ if (count > 0) {
+ // We negate and branch here rather than using DoBranch's negate
+ // and branch. This gives us a hook to remove statement state
+ // from the frame.
+ JumpTarget fall_through;
+ // Branch to fall through will not negate, because it is a
+ // forward-only target.
+ fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+ // Emit merge code.
+ cgen()->frame()->Drop(count);
+ DoJump();
+ fall_through.Bind();
+ } else {
+ DoBranch(cc, hint);
+ }
}
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
- statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current()->AddDeferred(this);
-
-#ifdef DEBUG
- CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
-#endif
+void BreakTarget::Bind() {
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ if (count > 0) {
+ cgen()->frame()->Drop(count);
+ }
+ }
+ DoBind();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target-light.h b/deps/v8/src/jump-target-light.h
new file mode 100644
index 0000000000..656ec75651
--- /dev/null
+++ b/deps/v8/src/jump-target-light.h
@@ -0,0 +1,187 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_LIGHT_H_
+#define V8_JUMP_TARGET_LIGHT_H_
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class FrameElement;
+class Result;
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code. It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths. When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame. For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
+ public:
+ // Forward-only jump targets can only be reached by forward CFG edges.
+ enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+ // Construct a jump target.
+ explicit inline JumpTarget(Directionality direction);
+
+ inline JumpTarget();
+
+ virtual ~JumpTarget() {}
+
+ void Unuse() {
+ entry_frame_set_ = false;
+ entry_label_.Unuse();
+ }
+
+ inline CodeGenerator* cgen();
+
+ const VirtualFrame* entry_frame() const {
+ return entry_frame_set_ ? &entry_frame_ : NULL;
+ }
+
+ void set_entry_frame(VirtualFrame* frame) {
+ entry_frame_ = *frame;
+ entry_frame_set_ = true;
+ }
+
+ // Predicates testing the state of the encapsulated label.
+ bool is_bound() const { return entry_label_.is_bound(); }
+ bool is_linked() const { return entry_label_.is_linked(); }
+ bool is_unused() const { return entry_label_.is_unused(); }
+
+ // Copy the state of this jump target to the destination.
+ inline void CopyTo(JumpTarget* destination) {
+ *destination = *this;
+ }
+
+ // Emit a jump to the target. There must be a current frame at the
+ // jump and there will be no current frame after the jump.
+ virtual void Jump();
+
+ // Emit a conditional branch to the target. There must be a current
+ // frame at the branch. The current frame will fall through to the
+ // code after the branch. The arg is a result that is live both at
+ // the target and the fall-through.
+ virtual void Branch(Condition cc, Hint hint = no_hint);
+
+ // Bind a jump target. If there is no current frame at the binding
+ // site, there must be at least one frame reaching via a forward
+ // jump.
+ virtual void Bind();
+
+ // Emit a call to a jump target. There must be a current frame at
+ // the call. The frame at the target is the same as the current
+ // frame except for an extra return address on top of it. The frame
+ // after the call is the same as the frame before the call.
+ void Call();
+
+ protected:
+ // Has an entry frame been found?
+ bool entry_frame_set_;
+
+ // The frame used on entry to the block and expected at backward
+ // jumps to the block. Set the first time something branches to this
+ // jump target.
+ VirtualFrame entry_frame_;
+
+ // The actual entry label of the block.
+ Label entry_label_;
+
+ // Implementations of Jump, Branch, and Bind with all arguments and
+ // return values using the virtual frame.
+ void DoJump();
+ void DoBranch(Condition cc, Hint hint);
+ void DoBind();
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally). They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+ // Construct a break target.
+ inline BreakTarget();
+
+ virtual ~BreakTarget() {}
+
+ // Copy the state of this jump target to the destination.
+ inline void CopyTo(BreakTarget* destination) {
+ *destination = *this;
+ }
+
+ // Emit a jump to the target. There must be a current frame at the
+ // jump and there will be no current frame after the jump.
+ virtual void Jump();
+
+ // Emit a conditional branch to the target. There must be a current
+ // frame at the branch. The current frame will fall through to the
+ // code after the branch.
+ virtual void Branch(Condition cc, Hint hint = no_hint);
+
+ // Bind a break target. If there is no current frame at the binding
+ // site, there must be at least one frame reaching via a forward
+ // jump.
+ virtual void Bind();
+
+ // Setter for expected height.
+ void set_expected_height(int expected) { expected_height_ = expected; }
+
+ // Uses the current frame to set the expected height.
+ void SetExpectedHeight();
+
+ private:
+ // The expected height of the expression stack where the target will
+ // be bound, statically known at initialization time.
+ int expected_height_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_JUMP_TARGET_LIGHT_H_
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc
index 8b2999549e..72aada8abf 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/jump-target.cc
@@ -37,17 +37,6 @@ namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
-bool JumpTarget::compiling_deferred_code_ = false;
-
-
-void JumpTarget::Unuse() {
- reaching_frames_.Clear();
- merge_labels_.Clear();
- entry_frame_ = NULL;
- entry_label_.Unuse();
-}
-
-
void JumpTarget::Jump() {
DoJump();
}
@@ -63,58 +52,6 @@ void JumpTarget::Bind() {
}
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
- ASSERT(reaching_frames_.length() == merge_labels_.length());
- ASSERT(entry_frame_ == NULL);
- Label fresh;
- merge_labels_.Add(fresh);
- reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
- JumpTarget::set_direction(direction);
- ASSERT(cgen()->has_valid_frame());
- expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
- ASSERT(destination != NULL);
- destination->direction_ = direction_;
- destination->reaching_frames_.Rewind(0);
- destination->reaching_frames_.AddAll(reaching_frames_);
- destination->merge_labels_.Rewind(0);
- destination->merge_labels_.AddAll(merge_labels_);
- destination->entry_frame_ = entry_frame_;
- destination->entry_label_ = entry_label_;
- destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- Jump(); // May emit merge code here.
- fall_through.Bind();
- } else {
- DoBranch(cc, hint);
- }
-}
-
-
// -------------------------------------------------------------------------
// ShadowTarget implementation.
@@ -151,5 +88,4 @@ void ShadowTarget::StopShadowing() {
#endif
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h
index db523b55ba..a0d2686b0f 100644
--- a/deps/v8/src/jump-target.h
+++ b/deps/v8/src/jump-target.h
@@ -28,216 +28,21 @@
#ifndef V8_JUMP_TARGET_H_
#define V8_JUMP_TARGET_H_
-#include "macro-assembler.h"
-#include "zone-inl.h"
+#if V8_TARGET_ARCH_IA32
+#include "jump-target-heavy.h"
+#elif V8_TARGET_ARCH_X64
+#include "jump-target-heavy.h"
+#elif V8_TARGET_ARCH_ARM
+#include "jump-target-light.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "jump-target-light.h"
+#else
+#error Unsupported target architecture.
+#endif
namespace v8 {
namespace internal {
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code. It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths. When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame. For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
- public:
- // Forward-only jump targets can only be reached by forward CFG edges.
- enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
- // Construct a jump target used to generate code and to provide
- // access to a current frame.
- explicit JumpTarget(Directionality direction)
- : direction_(direction),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- // Construct a jump target.
- JumpTarget()
- : direction_(FORWARD_ONLY),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- virtual ~JumpTarget() {}
-
- // Set the direction of the jump target.
- virtual void set_direction(Directionality direction) {
- direction_ = direction;
- }
-
- // Treat the jump target as a fresh one. The state is reset.
- void Unuse();
-
- inline CodeGenerator* cgen();
-
- Label* entry_label() { return &entry_label_; }
-
- VirtualFrame* entry_frame() const { return entry_frame_; }
- void set_entry_frame(VirtualFrame* frame) {
- entry_frame_ = frame;
- }
-
- // Predicates testing the state of the encapsulated label.
- bool is_bound() const { return entry_label_.is_bound(); }
- bool is_linked() const {
- return !is_bound() && !reaching_frames_.is_empty();
- }
- bool is_unused() const {
- // This is !is_bound() && !is_linked().
- return !is_bound() && reaching_frames_.is_empty();
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch. The arg is a result that is live both at
- // the target and the fall-through.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- virtual void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Hint hint = no_hint);
-
- // Bind a jump target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
- virtual void Bind(Result* arg0, Result* arg1);
-
- // Emit a call to a jump target. There must be a current frame at
- // the call. The frame at the target is the same as the current
- // frame except for an extra return address on top of it. The frame
- // after the call is the same as the frame before the call.
- void Call();
-
- static void set_compiling_deferred_code(bool flag) {
- compiling_deferred_code_ = flag;
- }
-
- protected:
- // Directionality flag set at initialization time.
- Directionality direction_;
-
- // A list of frames reaching this block via forward jumps.
- ZoneList<VirtualFrame*> reaching_frames_;
-
- // A parallel list of labels for merge code.
- ZoneList<Label> merge_labels_;
-
- // The frame used on entry to the block and expected at backward
- // jumps to the block. Set when the jump target is bound, but may
- // or may not be set for forward-only blocks.
- VirtualFrame* entry_frame_;
-
- // The actual entry label of the block.
- Label entry_label_;
-
- // Implementations of Jump, Branch, and Bind with all arguments and
- // return values using the virtual frame.
- void DoJump();
- void DoBranch(Condition cc, Hint hint);
- void DoBind();
-
- private:
- static bool compiling_deferred_code_;
-
- // Add a virtual frame reaching this labeled block via a forward jump,
- // and a corresponding merge code label.
- void AddReachingFrame(VirtualFrame* frame);
-
- // Perform initialization required during entry frame computation
- // after setting the virtual frame element at index in frame to be
- // target.
- inline void InitializeEntryElement(int index, FrameElement* target);
-
- // Compute a frame to use for entry to this block.
- void ComputeEntryFrame();
-
- DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally). They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
- // Construct a break target.
- BreakTarget() {}
-
- virtual ~BreakTarget() {}
-
- // Set the direction of the break target.
- virtual void set_direction(Directionality direction);
-
- // Copy the state of this break target to the destination. The
- // lists of forward-reaching frames and merge-point labels are
- // copied. All virtual frame pointers are copied, not the
- // pointed-to frames. The previous state of the destination is
- // overwritten, without deallocating pointed-to virtual frames.
- void CopyTo(BreakTarget* destination);
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
- // Bind a break target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
-
- // Setter for expected height.
- void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
- // The expected height of the expression stack where the target will
- // be bound, statically known at initialization time.
- int expected_height_;
-
- DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-
// -------------------------------------------------------------------------
// Shadow break targets
//
@@ -280,7 +85,6 @@ class ShadowTarget : public BreakTarget {
DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
};
-
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_H_
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 592ef49906..b14d3d82ce 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -988,7 +988,7 @@ class RelocInfoBuffer {
byte* buffer_;
int buffer_size_;
- static const int kBufferGap = 8;
+ static const int kBufferGap = RelocInfoWriter::kMaxSize;
static const int kMaximalBufferSize = 512*MB;
};
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 891b0e2b82..f48b358986 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -170,7 +170,7 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = it.frame()->pc();
+ sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
it.Advance();
}
sample->frames_count = i;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index a21e9604c2..686a61c367 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -68,13 +68,8 @@ const int kInvalidProtoDepth = -1;
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2.h"
-#include "arm/assembler-thumb2-inl.h"
-#else
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
-#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index d6ba2ca123..153374131e 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -112,6 +112,11 @@ macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
+# Macro for ECMAScript 5 queries of the type:
+# "Type(O) is object."
+# This is the same as being either a function or an object in V8 terminology.
+macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
+
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index e3cc6ab6cb..554b5795de 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -78,6 +78,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepLargeObjectSpace();
if (IsCompacting()) {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
UpdatePointers();
@@ -678,6 +679,7 @@ void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
void MarkCompactCollector::MarkLiveObjects() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
@@ -1163,6 +1165,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
Heap::UpdateRSet(target);
+ MarkCompactCollector::tracer()->
+ increment_promoted_objects_size(object_size);
return true;
}
} else {
@@ -1177,6 +1181,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
if (target_space == Heap::old_pointer_space()) {
Heap::UpdateRSet(target);
}
+ MarkCompactCollector::tracer()->
+ increment_promoted_objects_size(object_size);
return true;
}
}
@@ -1735,6 +1741,8 @@ MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
void MarkCompactCollector::SweepSpaces() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
+
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
// Noncompacting collections simply sweep the spaces to clear the mark
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 4a91624ed9..d9617dc7d3 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -34,6 +34,9 @@
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
@@ -1206,3 +1209,4 @@ void Assembler::set_target_address_at(Address pc, Address target) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 04bcfeb04c..26fea25153 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -29,6 +29,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
@@ -200,3 +202,4 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index ca1edd46b8..f8b88d7a27 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -28,6 +28,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
@@ -1426,3 +1428,5 @@ int CompareStub::MinorKey() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index a5ef9f8e6b..49502bdec7 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "constants-mips.h"
namespace assembler {
@@ -321,3 +324,5 @@ Instruction::Type Instruction::InstructionType() const {
}
} } // namespace assembler::mips
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index f592257e04..659fc01ce0 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -35,6 +35,9 @@
#endif // #ifdef __mips
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "cpu.h"
namespace v8 {
@@ -67,3 +70,4 @@ void CPU::DebugBreak() {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index cdb35ae3d0..8c40930c59 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -29,6 +29,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "debug.h"
@@ -126,3 +128,4 @@ const int Debug::kFrameDropperFrameSize = -1;
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index cab72d1db4..959a4a2206 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -57,6 +57,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
@@ -782,3 +784,4 @@ void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
} // namespace disasm
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/fast-codegen-mips.cc b/deps/v8/src/mips/fast-codegen-mips.cc
index 48a0ce6c75..186f9fadb6 100644
--- a/deps/v8/src/mips/fast-codegen-mips.cc
+++ b/deps/v8/src/mips/fast-codegen-mips.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "fast-codegen.h"
@@ -72,3 +74,4 @@ void FastCodeGenerator::EmitBitOr() {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index cdc880dcd3..0fce3cdd95 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -28,6 +28,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "frames-inl.h"
#include "mips/assembler-mips-inl.h"
@@ -97,3 +99,4 @@ Address InternalFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3c29e99be9..afda2cbb21 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -271,3 +273,5 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 8c9092124c..519fe624b9 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -29,6 +29,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
@@ -215,3 +217,4 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc
index 4bd91028a7..408f75e79a 100644
--- a/deps/v8/src/mips/jump-target-mips.cc
+++ b/deps/v8/src/mips/jump-target-mips.cc
@@ -28,6 +28,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@@ -170,3 +172,4 @@ void BreakTarget::Bind(Result* arg) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c276af5106..e096028e38 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -29,6 +29,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
@@ -1321,3 +1323,4 @@ void MacroAssembler::AlignStack(int offset) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/register-allocator-mips.cc b/deps/v8/src/mips/register-allocator-mips.cc
index f48d3a6559..2c5d61bee0 100644
--- a/deps/v8/src/mips/register-allocator-mips.cc
+++ b/deps/v8/src/mips/register-allocator-mips.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
@@ -58,3 +60,4 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index bdb3b7f3bc..886b9e4faa 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -29,6 +29,8 @@
#include <cstdarg>
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "disasm.h"
#include "assembler.h"
#include "globals.h" // Need the BitCast
@@ -1646,3 +1648,4 @@ uintptr_t Simulator::PopAddress() {
#endif // __mips
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 0b2d2c3333..faaacbc4db 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@@ -398,3 +400,4 @@ Object* ConstructStubCompiler::CompileConstructStub(
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc
index c2116de77a..b61ce75bd9 100644
--- a/deps/v8/src/mips/virtual-frame-mips.cc
+++ b/deps/v8/src/mips/virtual-frame-mips.cc
@@ -29,6 +29,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_MIPS)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -314,3 +316,4 @@ void VirtualFrame::EmitArgumentSlots(RegList reglist) {
} } // namespace v8::internal
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index ad15104152..d82d73ec50 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1691,13 +1691,19 @@ bool String::Equals(String* other) {
Object* String::TryFlatten(PretenureFlag pretenure) {
- // We don't need to flatten strings that are already flat. Since this code
- // is inlined, it can be helpful in the flat case to not call out to Flatten.
- if (IsFlat()) return this;
+ if (!StringShape(this).IsCons()) return this;
+ ConsString* cons = ConsString::cast(this);
+ if (cons->second()->length() == 0) return cons->first();
return SlowTryFlatten(pretenure);
}
+String* String::TryFlattenGetString(PretenureFlag pretenure) {
+ Object* flat = TryFlatten(pretenure);
+ return flat->IsFailure() ? this : String::cast(flat);
+}
+
+
uint16_t String::Get(int index) {
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index c8acb47071..360eb28fb1 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -631,7 +631,7 @@ Object* String::SlowTryFlatten(PretenureFlag pretenure) {
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
if (cs->second()->length() == 0) {
- return this;
+ return cs->first();
}
// There's little point in putting the flat string in new space if the
// cons string is in old space. It can never get GCed until there is
@@ -669,7 +669,7 @@ Object* String::SlowTryFlatten(PretenureFlag pretenure) {
}
cs->set_first(result);
cs->set_second(Heap::empty_string());
- return this;
+ return result;
}
default:
return this;
@@ -4580,51 +4580,58 @@ bool String::SlowEquals(String* other) {
if (Hash() != other->Hash()) return false;
}
- if (StringShape(this).IsSequentialAscii() &&
- StringShape(other).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(this)->GetChars();
- const char* str2 = SeqAsciiString::cast(other)->GetChars();
+ // We know the strings are both non-empty. Compare the first chars
+ // before we try to flatten the strings.
+ if (this->Get(0) != other->Get(0)) return false;
+
+ String* lhs = this->TryFlattenGetString();
+ String* rhs = other->TryFlattenGetString();
+
+ if (StringShape(lhs).IsSequentialAscii() &&
+ StringShape(rhs).IsSequentialAscii()) {
+ const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
+ const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
return CompareRawStringContents(Vector<const char>(str1, len),
Vector<const char>(str2, len));
}
- if (this->IsFlat()) {
+ if (lhs->IsFlat()) {
if (IsAsciiRepresentation()) {
- Vector<const char> vec1 = this->ToAsciiVector();
- if (other->IsFlat()) {
- if (other->IsAsciiRepresentation()) {
- Vector<const char> vec2 = other->ToAsciiVector();
+ Vector<const char> vec1 = lhs->ToAsciiVector();
+ if (rhs->IsFlat()) {
+ if (rhs->IsAsciiRepresentation()) {
+ Vector<const char> vec2 = rhs->ToAsciiVector();
return CompareRawStringContents(vec1, vec2);
} else {
VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(other->ToUC16Vector());
+ VectorIterator<uc16> ib(rhs->ToUC16Vector());
return CompareStringContents(&buf1, &ib);
}
} else {
VectorIterator<char> buf1(vec1);
- string_compare_buffer_b.Reset(0, other);
+ string_compare_buffer_b.Reset(0, rhs);
return CompareStringContents(&buf1, &string_compare_buffer_b);
}
} else {
- Vector<const uc16> vec1 = this->ToUC16Vector();
- if (other->IsFlat()) {
- if (other->IsAsciiRepresentation()) {
+ Vector<const uc16> vec1 = lhs->ToUC16Vector();
+ if (rhs->IsFlat()) {
+ if (rhs->IsAsciiRepresentation()) {
VectorIterator<uc16> buf1(vec1);
- VectorIterator<char> ib(other->ToAsciiVector());
+ VectorIterator<char> ib(rhs->ToAsciiVector());
return CompareStringContents(&buf1, &ib);
} else {
- Vector<const uc16> vec2(other->ToUC16Vector());
+ Vector<const uc16> vec2(rhs->ToUC16Vector());
return CompareRawStringContents(vec1, vec2);
}
} else {
VectorIterator<uc16> buf1(vec1);
- string_compare_buffer_b.Reset(0, other);
+ string_compare_buffer_b.Reset(0, rhs);
return CompareStringContents(&buf1, &string_compare_buffer_b);
}
}
} else {
- string_compare_buffer_a.Reset(0, this);
- return CompareStringContentsPartial(&string_compare_buffer_a, other);
+ string_compare_buffer_a.Reset(0, lhs);
+ return CompareStringContentsPartial(&string_compare_buffer_a, rhs);
}
}
@@ -7038,15 +7045,9 @@ class SymbolKey : public HashTableKey {
}
Object* AsObject() {
- // If the string is a cons string, attempt to flatten it so that
- // symbols will most often be flat strings.
- if (StringShape(string_).IsCons()) {
- ConsString* cons_string = ConsString::cast(string_);
- cons_string->TryFlatten();
- if (cons_string->second()->length() == 0) {
- string_ = cons_string->first();
- }
- }
+ // Attempt to flatten the string, so that symbols will most often
+ // be flat strings.
+ string_ = string_->TryFlattenGetString();
// Transform string to symbol if possible.
Map* map = Heap::SymbolMapForString(string_);
if (map != NULL) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 8b114a64ff..7f9c2a03db 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -4001,17 +4001,28 @@ class String: public HeapObject {
// to this method are not efficient unless the string is flat.
inline uint16_t Get(int index);
- // Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString. Flatten
- // mutates the ConsString and might return a failure.
- Object* SlowTryFlatten(PretenureFlag pretenure);
-
- // Try to flatten the string. Checks first inline to see if it is necessary.
- // Do not handle allocation failures. After calling TryFlatten, the
- // string could still be a ConsString, in which case a failure is returned.
- // Use FlattenString from Handles.cc to be sure to flatten.
+ // Try to flatten the string. Checks first inline to see if it is
+ // necessary. Does nothing if the string is not a cons string.
+ // Flattening allocates a sequential string with the same data as
+ // the given string and mutates the cons string to a degenerate
+ // form, where the first component is the new sequential string and
+ // the second component is the empty string. If allocation fails,
+ // this function returns a failure. If flattening succeeds, this
+ // function returns the sequential string that is now the first
+ // component of the cons string.
+ //
+ // Degenerate cons strings are handled specially by the garbage
+ // collector (see IsShortcutCandidate).
+ //
+ // Use FlattenString from Handles.cc to flatten even in case an
+ // allocation failure happens.
inline Object* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
+ // Convenience function. Has exactly the same behavior as
+ // TryFlatten(), except in the case of failure returns the original
+ // string.
+ inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
+
Vector<const char> ToAsciiVector();
Vector<const uc16> ToUC16Vector();
@@ -4197,6 +4208,11 @@ class String: public HeapObject {
unsigned max_chars);
private:
+ // Try to flatten the top level ConsString that is hiding behind this
+ // string. This is a no-op unless the string is a ConsString. Flatten
+ // mutates the ConsString and might return a failure.
+ Object* SlowTryFlatten(PretenureFlag pretenure);
+
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 089eeeea60..c482fdf5f9 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -33,12 +33,15 @@
#include "codegen.h"
#include "compiler.h"
#include "messages.h"
+#include "parser.h"
#include "platform.h"
#include "runtime.h"
-#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
+#include "ast-inl.h"
+#include "jump-target-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 16617cec10..0ae1ecf452 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -36,7 +36,7 @@
#include <unistd.h> // getpagesize(), usleep()
#include <sys/mman.h> // mmap()
#include <ucontext.h> // walkstack(), getcontext()
-#include <dlfcn.h> // dladdr1
+#include <dlfcn.h> // dladdr
#include <pthread.h>
#include <sched.h> // for sched_yield
#include <semaphore.h>
@@ -54,14 +54,19 @@
#include "platform.h"
+// It seems there is a bug in some Solaris distributions (experienced in
+// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
+// access signbit() despite the availability of other C99 math functions.
#ifndef signbit
// Test sign - usually defined in math.h
int signbit(double x) {
- // We need to take care of the special case of both positive
- // and negative versions of zero.
+ // We need to take care of the special case of both positive and negative
+ // versions of zero.
if (x == 0) {
return fpclass(x) & FP_NZERO;
} else {
+ // This won't detect negative NaN but that should be okay since we don't
+ // assume that behavior.
return x < 0;
}
}
@@ -245,19 +250,19 @@ void OS::LogSharedLibraryAddresses() {
}
-struct stack_walker {
- Vector<OS::StackFrame> &frames;
+struct StackWalker {
+ Vector<OS::StackFrame>& frames;
int index;
};
-static int StackWalkCallback(uintptr_t pc, int signo, void *data) {
- struct stack_walker * walker = static_cast<struct stack_walker *>(data);
+static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
+ struct StackWalker* walker = static_cast<struct StackWalker*>(data);
Dl_info info;
int i = walker->index;
- walker->frames[i].address = (void*)pc;
+ walker->frames[i].address = reinterpret_cast<void*>(pc);
// Make sure line termination is in place.
walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
@@ -265,17 +270,17 @@ static int StackWalkCallback(uintptr_t pc, int signo, void *data) {
Vector<char> text = MutableCStrVector(walker->frames[i].text,
OS::kStackWalkMaxTextLen);
- if (dladdr((void*)pc, &info) == 0) {
+ if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
OS::SNPrintF(text, "[0x%p]", pc);
} else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
- // we have containing symbol info
+ // We have symbol info.
OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
} else {
- // no local symbol info
+ // No local symbol info.
OS::SNPrintF(text,
"%s'0x%p [0x%p]",
info.dli_fname,
- (unsigned long)pc - (unsigned long)info.dli_fbase,
+ pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
pc);
}
walker->index++;
@@ -285,11 +290,11 @@ static int StackWalkCallback(uintptr_t pc, int signo, void *data) {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
ucontext_t ctx;
- struct stack_walker walker = { frames, 0 };
+ struct StackWalker walker = { frames, 0 };
if (getcontext(&ctx) < 0) return kStackWalkError;
- if (!walkcontext(&ctx, StackWalkCallback, (void*)(&walker))) {
+ if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
return kStackWalkError;
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index f89b34241d..606e5b46e6 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -83,8 +83,9 @@ int random();
#endif // WIN32
+
#ifdef __sun
-# if !defined(signbit)
+# ifndef signbit
int signbit(double x);
# endif
#endif
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 628fa4494e..fecb70b775 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -35,17 +35,30 @@
namespace v8 {
namespace internal {
+CodeEntry::CodeEntry(int security_token_id)
+ : call_uid_(0),
+ tag_(Logger::FUNCTION_TAG),
+ name_prefix_(kEmptyNamePrefix),
+ name_(""),
+ resource_name_(""),
+ line_number_(0),
+ security_token_id_(security_token_id) {
+}
+
+
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
const char* resource_name,
- int line_number)
+ int line_number,
+ int security_token_id)
: call_uid_(next_call_uid_++),
tag_(tag),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
- line_number_(line_number) {
+ line_number_(line_number),
+ security_token_id_(security_token_id) {
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 4c2a330898..ad8867ced1 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -28,6 +28,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
+#include "global-handles.h"
#include "profile-generator-inl.h"
@@ -37,10 +38,68 @@ namespace v8 {
namespace internal {
+TokenEnumerator::TokenEnumerator()
+ : token_locations_(4),
+ token_removed_(4) {
+}
+
+
+TokenEnumerator::~TokenEnumerator() {
+ for (int i = 0; i < token_locations_.length(); ++i) {
+ if (!token_removed_[i]) {
+ GlobalHandles::ClearWeakness(token_locations_[i]);
+ GlobalHandles::Destroy(token_locations_[i]);
+ }
+ }
+}
+
+
+int TokenEnumerator::GetTokenId(Object* token) {
+ if (token == NULL) return CodeEntry::kNoSecurityToken;
+ for (int i = 0; i < token_locations_.length(); ++i) {
+ if (*token_locations_[i] == token && !token_removed_[i]) return i;
+ }
+ Handle<Object> handle = GlobalHandles::Create(token);
+ // handle.location() points to a memory cell holding a pointer
+ // to a token object in the V8's heap.
+ GlobalHandles::MakeWeak(handle.location(), this, TokenRemovedCallback);
+ token_locations_.Add(handle.location());
+ token_removed_.Add(false);
+ return token_locations_.length() - 1;
+}
+
+
+void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+ void* parameter) {
+ reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
+ Utils::OpenHandle(*handle).location());
+}
+
+
+void TokenEnumerator::TokenRemoved(Object** token_location) {
+ for (int i = 0; i < token_locations_.length(); ++i) {
+ if (token_locations_[i] == token_location && !token_removed_[i]) {
+ token_removed_[i] = true;
+ return;
+ }
+ }
+}
+
+
const char* CodeEntry::kEmptyNamePrefix = "";
unsigned CodeEntry::next_call_uid_ = 1;
+void CodeEntry::CopyData(const CodeEntry& source) {
+ call_uid_ = source.call_uid_;
+ tag_ = source.tag_;
+ name_prefix_ = source.name_prefix_;
+ name_ = source.name_;
+ resource_name_ = source.resource_name_;
+ line_number_ = source.line_number_;
+}
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -73,11 +132,12 @@ double ProfileNode::GetTotalMillis() const {
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s",
+ OS::Print("%5u %5u %*c %s%s [%d]",
total_ticks_, self_ticks_,
indent, ' ',
entry_->name_prefix(),
- entry_->name());
+ entry_->name(),
+ entry_->security_token_id());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
OS::Print("\n");
@@ -93,6 +153,8 @@ namespace {
class DeleteNodesCallback {
public:
+ void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
+
void AfterAllChildrenTraversed(ProfileNode* node) {
delete node;
}
@@ -104,14 +166,19 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree()
- : root_entry_(Logger::FUNCTION_TAG, "", "(root)", "", 0),
+ : root_entry_(Logger::FUNCTION_TAG,
+ "",
+ "(root)",
+ "",
+ 0,
+ CodeEntry::kNoSecurityToken),
root_(new ProfileNode(this, &root_entry_)) {
}
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
- TraverseDepthFirstPostOrder(&cb);
+ TraverseDepthFirst(&cb);
}
@@ -141,6 +208,70 @@ void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
}
+namespace {
+
+struct NodesPair {
+ NodesPair(ProfileNode* src, ProfileNode* dst)
+ : src(src), dst(dst) { }
+ ProfileNode* src;
+ ProfileNode* dst;
+};
+
+
+class FilteredCloneCallback {
+ public:
+ explicit FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
+ : stack_(10),
+ security_token_id_(security_token_id) {
+ stack_.Add(NodesPair(NULL, dst_root));
+ }
+
+ void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
+ if (IsTokenAcceptable(child->entry()->security_token_id(),
+ parent->entry()->security_token_id())) {
+ ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
+ clone->IncreaseSelfTicks(child->self_ticks());
+ stack_.Add(NodesPair(child, clone));
+ } else {
+ // Attribute ticks to parent node.
+ stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
+ }
+ }
+
+ void AfterAllChildrenTraversed(ProfileNode* parent) { }
+
+ void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
+ if (stack_.last().src == child) {
+ stack_.RemoveLast();
+ }
+ }
+
+ private:
+ bool IsTokenAcceptable(int token, int parent_token) {
+ if (token == CodeEntry::kNoSecurityToken
+ || token == security_token_id_) return true;
+ if (token == CodeEntry::kInheritsSecurityToken) {
+ ASSERT(parent_token != CodeEntry::kInheritsSecurityToken);
+ return parent_token == CodeEntry::kNoSecurityToken
+ || parent_token == security_token_id_;
+ }
+ return false;
+ }
+
+ List<NodesPair> stack_;
+ int security_token_id_;
+};
+
+} // namespace
+
+void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
+ ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
+ FilteredCloneCallback cb(root_, security_token_id);
+ src->TraverseDepthFirst(&cb);
+ CalculateTotalTicks();
+}
+
+
void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
}
@@ -170,12 +301,13 @@ class Position {
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
-void ProfileTree::TraverseDepthFirstPostOrder(Callback* callback) {
+void ProfileTree::TraverseDepthFirst(Callback* callback) {
List<Position> stack(10);
stack.Add(Position(root_));
- do {
+ while (stack.length() > 0) {
Position& current = stack.last();
if (current.has_current_child()) {
+ callback->BeforeTraversingChild(current.node, current.current_child());
stack.Add(Position(current.current_child()));
} else {
callback->AfterAllChildrenTraversed(current.node);
@@ -183,11 +315,11 @@ void ProfileTree::TraverseDepthFirstPostOrder(Callback* callback) {
Position& parent = stack[stack.length() - 2];
callback->AfterChildTraversed(parent.node, current.node);
parent.next_child();
- // Remove child from the stack.
- stack.RemoveLast();
}
+ // Remove child from the stack.
+ stack.RemoveLast();
}
- } while (stack.length() > 1 || stack.last().has_current_child());
+ }
}
@@ -195,6 +327,8 @@ namespace {
class CalculateTotalTicksCallback {
public:
+ void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
+
void AfterAllChildrenTraversed(ProfileNode* node) {
node->IncreaseTotalTicks(node->self_ticks());
}
@@ -209,7 +343,7 @@ class CalculateTotalTicksCallback {
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
- TraverseDepthFirstPostOrder(&cb);
+ TraverseDepthFirst(&cb);
}
@@ -238,6 +372,15 @@ void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
}
+CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
+ ASSERT(security_token_id != CodeEntry::kNoSecurityToken);
+ CpuProfile* clone = new CpuProfile(title_, uid_);
+ clone->top_down_.FilteredClone(&top_down_, security_token_id);
+ clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
+ return clone;
+}
+
+
void CpuProfile::ShortPrint() {
OS::Print("top down ");
top_down_.ShortPrint();
@@ -259,12 +402,13 @@ const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
CodeMap::CodeEntryInfo(NULL, 0);
-void CodeMap::AddAlias(Address alias, Address addr) {
+void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
CodeTree::Locator locator;
- if (tree_.Find(addr, &locator)) {
- const CodeEntryInfo& entry_info = locator.value();
- tree_.Insert(alias, &locator);
- locator.set_value(entry_info);
+ if (tree_.Find(code_start, &locator)) {
+ const CodeEntryInfo& code_info = locator.value();
+ entry->CopyData(*code_info.entry);
+ tree_.Insert(start, &locator);
+ locator.set_value(CodeEntryInfo(entry, code_info.size));
}
}
@@ -295,8 +439,10 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection()
: function_and_resource_names_(StringsMatch),
- profiles_uids_(CpuProfilesMatch),
+ profiles_uids_(UidsMatch),
current_profiles_semaphore_(OS::CreateSemaphore(1)) {
+ // Create list of unabridged profiles.
+ profiles_by_token_.Add(new List<CpuProfile*>());
}
@@ -313,11 +459,15 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
+static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
+ (*list_ptr)->Iterate(DeleteCpuProfile);
+ delete *list_ptr;
+}
CpuProfilesCollection::~CpuProfilesCollection() {
delete current_profiles_semaphore_;
current_profiles_.Iterate(DeleteCpuProfile);
- profiles_.Iterate(DeleteCpuProfile);
+ profiles_by_token_.Iterate(DeleteProfilesList);
code_entries_.Iterate(DeleteCodeEntry);
args_count_names_.Iterate(DeleteArgsCountName);
for (HashMap::Entry* p = function_and_resource_names_.Start();
@@ -349,7 +499,8 @@ bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
}
-CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
+CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
+ const char* title,
double actual_sampling_rate) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
@@ -365,29 +516,89 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
if (profile != NULL) {
profile->CalculateTotalTicks();
profile->SetActualSamplingRate(actual_sampling_rate);
- profiles_.Add(profile);
+ List<CpuProfile*>* unabridged_list =
+ profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ unabridged_list->Add(profile);
HashMap::Entry* entry =
profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
static_cast<uint32_t>(profile->uid()),
true);
ASSERT(entry->value == NULL);
- entry->value = profile;
+ entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
+ return GetProfile(security_token_id, profile->uid());
}
- return profile;
+ return NULL;
}
-CpuProfile* CpuProfilesCollection::StopProfiling(String* title,
+CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
+ String* title,
double actual_sampling_rate) {
- return StopProfiling(GetName(title), actual_sampling_rate);
+ return StopProfiling(security_token_id, GetName(title), actual_sampling_rate);
}
-CpuProfile* CpuProfilesCollection::GetProfile(unsigned uid) {
+CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
+ unsigned uid) {
HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
static_cast<uint32_t>(uid),
false);
- return entry != NULL ? reinterpret_cast<CpuProfile*>(entry->value) : NULL;
+ int index;
+ if (entry != NULL) {
+ index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ } else {
+ return NULL;
+ }
+ List<CpuProfile*>* unabridged_list =
+ profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ if (security_token_id == CodeEntry::kNoSecurityToken) {
+ return unabridged_list->at(index);
+ }
+ List<CpuProfile*>* list = GetProfilesList(security_token_id);
+ if (list->at(index) == NULL) {
+ list->at(index) =
+ unabridged_list->at(index)->FilteredClone(security_token_id);
+ }
+ return list->at(index);
+}
+
+
+int CpuProfilesCollection::TokenToIndex(int security_token_id) {
+ ASSERT(CodeEntry::kNoSecurityToken == -1);
+ return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
+}
+
+
+List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
+ int security_token_id) {
+ const int index = TokenToIndex(security_token_id);
+ profiles_by_token_.AddBlock(NULL, profiles_by_token_.length() - index + 1);
+ List<CpuProfile*>* unabridged_list =
+ profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ const int current_count = unabridged_list->length();
+ if (profiles_by_token_[index] == NULL) {
+ profiles_by_token_[index] = new List<CpuProfile*>(current_count);
+ }
+ List<CpuProfile*>* list = profiles_by_token_[index];
+ list->AddBlock(NULL, current_count - list->length());
+ return list;
+}
+
+
+List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
+ List<CpuProfile*>* unabridged_list =
+ profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+ if (security_token_id == CodeEntry::kNoSecurityToken) {
+ return unabridged_list;
+ }
+ List<CpuProfile*>* list = GetProfilesList(security_token_id);
+ const int current_count = unabridged_list->length();
+ for (int i = 0; i < current_count; ++i) {
+ if (list->at(i) == NULL) {
+ list->at(i) = unabridged_list->at(i)->FilteredClone(security_token_id);
+ }
+ }
+ return list;
}
@@ -399,7 +610,8 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
CodeEntry::kEmptyNamePrefix,
GetFunctionName(name),
GetName(resource_name),
- line_number);
+ line_number,
+ CodeEntry::kNoSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -411,7 +623,8 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
CodeEntry::kEmptyNamePrefix,
GetFunctionName(name),
"",
- v8::CpuProfileNode::kNoLineNumberInfo);
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ CodeEntry::kNoSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -424,7 +637,8 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
name_prefix,
GetName(name),
"",
- v8::CpuProfileNode::kNoLineNumberInfo);
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ CodeEntry::kInheritsSecurityToken);
code_entries_.Add(entry);
return entry;
}
@@ -436,7 +650,15 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
"args_count: ",
GetName(args_count),
"",
- v8::CpuProfileNode::kNoLineNumberInfo);
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ CodeEntry::kInheritsSecurityToken);
+ code_entries_.Add(entry);
+ return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) {
+ CodeEntry* entry = new CodeEntry(security_token_id);
code_entries_.Add(entry);
return entry;
}
@@ -547,8 +769,13 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
*entry = NULL;
} else {
CodeEntry* pc_entry = *entries.start();
- if (pc_entry == NULL || pc_entry->is_js_function())
+ if (pc_entry == NULL) {
*entry = NULL;
+ } else if (pc_entry->is_js_function()) {
+ // Use function entry in favor of pc entry, as function
+ // entry has security token.
+ *entries.start() = NULL;
+ }
}
entry++;
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index bd5b0cd24f..7830787024 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -35,14 +35,34 @@
namespace v8 {
namespace internal {
+class TokenEnumerator {
+ public:
+ TokenEnumerator();
+ ~TokenEnumerator();
+ int GetTokenId(Object* token);
+
+ private:
+ static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+ void* parameter);
+ void TokenRemoved(Object** token_location);
+
+ List<Object**> token_locations_;
+ List<bool> token_removed_;
+
+ friend class TokenEnumeratorTester;
+};
+
+
class CodeEntry {
public:
+ explicit INLINE(CodeEntry(int security_token_id));
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
const char* resource_name,
- int line_number));
+ int line_number,
+ int security_token_id));
INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
INLINE(const char* name_prefix() const) { return name_prefix_; }
@@ -51,18 +71,24 @@ class CodeEntry {
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
INLINE(unsigned call_uid() const) { return call_uid_; }
+ INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
+ void CopyData(const CodeEntry& source);
+
static const char* kEmptyNamePrefix;
+ static const int kNoSecurityToken = -1;
+ static const int kInheritsSecurityToken = -2;
private:
- const unsigned call_uid_;
+ unsigned call_uid_;
Logger::LogEventsAndTags tag_;
const char* name_prefix_;
const char* name_;
const char* resource_name_;
int line_number_;
+ int security_token_id_;
static unsigned next_call_uid_;
@@ -79,6 +105,7 @@ class ProfileNode {
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
+ INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(CodeEntry* entry() const) { return entry_; }
@@ -119,6 +146,7 @@ class ProfileTree {
void AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
+ void FilteredClone(ProfileTree* src, int security_token_id);
double TicksToMillis(unsigned ticks) const {
return ticks * ms_to_ticks_scale_;
@@ -133,7 +161,7 @@ class ProfileTree {
private:
template <typename Callback>
- void TraverseDepthFirstPostOrder(Callback* callback);
+ void TraverseDepthFirst(Callback* callback);
CodeEntry root_entry_;
ProfileNode* root_;
@@ -152,6 +180,7 @@ class CpuProfile {
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
void SetActualSamplingRate(double actual_sampling_rate);
+ CpuProfile* FilteredClone(int security_token_id);
INLINE(const char* title() const) { return title_; }
INLINE(unsigned uid() const) { return uid_; }
@@ -179,7 +208,7 @@ class CodeMap {
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
INLINE(void MoveCode(Address from, Address to));
INLINE(void DeleteCode(Address addr));
- void AddAlias(Address alias, Address addr);
+ void AddAlias(Address start, CodeEntry* entry, Address code_start);
CodeEntry* FindEntry(Address addr);
void Print();
@@ -221,10 +250,14 @@ class CpuProfilesCollection {
bool StartProfiling(const char* title, unsigned uid);
bool StartProfiling(String* title, unsigned uid);
- CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
- CpuProfile* StopProfiling(String* title, double actual_sampling_rate);
- INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
- CpuProfile* GetProfile(unsigned uid);
+ CpuProfile* StopProfiling(int security_token_id,
+ const char* title,
+ double actual_sampling_rate);
+ CpuProfile* StopProfiling(int security_token_id,
+ String* title,
+ double actual_sampling_rate);
+ List<CpuProfile*>* Profiles(int security_token_id);
+ CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile();
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
@@ -233,6 +266,7 @@ class CpuProfilesCollection {
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix, String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
+ CodeEntry* NewCodeEntry(int security_token_id);
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -242,13 +276,15 @@ class CpuProfilesCollection {
INLINE(const char* GetFunctionName(const char* name));
const char* GetName(String* name);
const char* GetName(int args_count);
+ List<CpuProfile*>* GetProfilesList(int security_token_id);
+ int TokenToIndex(int security_token_id);
INLINE(static bool StringsMatch(void* key1, void* key2)) {
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
}
- INLINE(static bool CpuProfilesMatch(void* key1, void* key2)) {
+ INLINE(static bool UidsMatch(void* key1, void* key2)) {
return key1 == key2;
}
@@ -257,8 +293,8 @@ class CpuProfilesCollection {
// args_count -> char*
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
- List<CpuProfile*> profiles_;
- // uid -> CpuProfile*
+ List<List<CpuProfile*>* > profiles_by_token_;
+ // uid -> index
HashMap profiles_uids_;
// Accessed by VM thread and profile generator thread.
@@ -332,6 +368,10 @@ class ProfileGenerator {
return profiles_->NewCodeEntry(tag, args_count);
}
+ INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
+ return profiles_->NewCodeEntry(security_token_id);
+ }
+
void RecordTickSample(const TickSample& sample);
INLINE(CodeMap* code_map()) { return &code_map_; }
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
index b9989a5dde..31d0a49fa5 100644
--- a/deps/v8/src/register-allocator.cc
+++ b/deps/v8/src/register-allocator.cc
@@ -84,15 +84,16 @@ Result RegisterAllocator::Allocate() {
Result RegisterAllocator::Allocate(Register target) {
// If the target is not referenced, it can simply be allocated.
- if (!is_used(target)) {
+ if (!is_used(RegisterAllocator::ToNumber(target))) {
return Result(target);
}
// If the target is only referenced in the frame, it can be spilled and
// then allocated.
ASSERT(cgen_->has_valid_frame());
- if (cgen_->frame()->is_used(target) && count(target) == 1) {
+ if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
+ count(target) == 1) {
cgen_->frame()->Spill(target);
- ASSERT(!is_used(target));
+ ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
return Result(target);
}
// Otherwise (if it's referenced outside the frame) we cannot allocate it.
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index be93c4febe..8e3883f756 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -80,10 +80,7 @@ function EQUALS(y) {
} else {
// x is not a number, boolean, null or undefined.
if (y == null) return 1; // not equal
- if (IS_OBJECT(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
- if (IS_FUNCTION(y)) {
+ if (IS_SPEC_OBJECT_OR_NULL(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
@@ -344,7 +341,7 @@ function DELETE(key) {
// ECMA-262, section 11.8.7, page 54.
function IN(x) {
- if (x == null || (!IS_OBJECT(x) && !IS_FUNCTION(x))) {
+ if (x == null || !IS_SPEC_OBJECT_OR_NULL(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
@@ -362,13 +359,13 @@ function INSTANCE_OF(F) {
}
// If V is not an object, return false.
- if (IS_NULL(V) || (!IS_OBJECT(V) && !IS_FUNCTION(V))) {
+ if (IS_NULL(V) || !IS_SPEC_OBJECT_OR_NULL(V)) {
return 1;
}
// Get the prototype of F; if it is not an object, throw an error.
var O = F.prototype;
- if (IS_NULL(O) || (!IS_OBJECT(O) && !IS_FUNCTION(O))) {
+ if (IS_NULL(O) || !IS_SPEC_OBJECT_OR_NULL(O)) {
throw %MakeTypeError('instanceof_nonobject_proto', [O]);
}
@@ -482,7 +479,7 @@ function ToPrimitive(x, hint) {
// Fast case check.
if (IS_STRING(x)) return x;
// Normal behavior.
- if (!IS_OBJECT(x) && !IS_FUNCTION(x)) return x;
+ if (!IS_SPEC_OBJECT_OR_NULL(x)) return x;
if (x == null) return x; // check for null, undefined
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
@@ -587,7 +584,7 @@ function SameValue(x, y) {
// Returns if the given x is a primitive value - not an object or a
// function.
function IsPrimitive(x) {
- if (!IS_OBJECT(x) && !IS_FUNCTION(x)) {
+ if (!IS_SPEC_OBJECT_OR_NULL(x)) {
return true;
} else {
// Even though the type of null is "object", null is still
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index dcaa101155..a3a2ba9bcb 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -660,27 +660,164 @@ void Deserializer::ReadObject(int space_number,
}
-#define ONE_CASE_PER_SPACE(base_tag) \
- case (base_tag) + NEW_SPACE: /* NOLINT */ \
- case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \
- case (base_tag) + OLD_DATA_SPACE: /* NOLINT */ \
- case (base_tag) + CODE_SPACE: /* NOLINT */ \
- case (base_tag) + MAP_SPACE: /* NOLINT */ \
- case (base_tag) + CELL_SPACE: /* NOLINT */ \
- case (base_tag) + kLargeData: /* NOLINT */ \
- case (base_tag) + kLargeCode: /* NOLINT */ \
- case (base_tag) + kLargeFixedArray: /* NOLINT */
+// This macro is always used with a constant argument so it should all fold
+// away to almost nothing in the generated code. It might be nicer to do this
+// with the ternary operator but there are type issues with that.
+#define ASSIGN_DEST_SPACE(space_number) \
+ Space* dest_space; \
+ if (space_number == NEW_SPACE) { \
+ dest_space = Heap::new_space(); \
+ } else if (space_number == OLD_POINTER_SPACE) { \
+ dest_space = Heap::old_pointer_space(); \
+ } else if (space_number == OLD_DATA_SPACE) { \
+ dest_space = Heap::old_data_space(); \
+ } else if (space_number == CODE_SPACE) { \
+ dest_space = Heap::code_space(); \
+ } else if (space_number == MAP_SPACE) { \
+ dest_space = Heap::map_space(); \
+ } else if (space_number == CELL_SPACE) { \
+ dest_space = Heap::cell_space(); \
+ } else { \
+ ASSERT(space_number >= LO_SPACE); \
+ dest_space = Heap::lo_space(); \
+ }
+
+
+static const int kUnknownOffsetFromStart = -1;
void Deserializer::ReadChunk(Object** current,
Object** limit,
- int space,
+ int source_space,
Address address) {
while (current < limit) {
int data = source_->Get();
switch (data) {
+#define CASE_STATEMENT(where, how, within, space_number) \
+ case where + how + within + space_number: \
+ ASSERT((where & ~kPointedToMask) == 0); \
+ ASSERT((how & ~kHowToCodeMask) == 0); \
+ ASSERT((within & ~kWhereToPointMask) == 0); \
+ ASSERT((space_number & ~kSpaceMask) == 0);
+
+#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \
+ { \
+ bool emit_write_barrier = false; \
+ bool current_was_incremented = false; \
+ int space_number = space_number_if_any == kAnyOldSpace ? \
+ (data & kSpaceMask) : space_number_if_any; \
+ if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
+ ASSIGN_DEST_SPACE(space_number) \
+ ReadObject(space_number, dest_space, current); \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ } else { \
+ Object* new_object = NULL; /* May not be a real Object pointer. */ \
+ if (where == kNewObject) { \
+ ASSIGN_DEST_SPACE(space_number) \
+ ReadObject(space_number, dest_space, &new_object); \
+ } else if (where == kRootArray) { \
+ int root_id = source_->GetInt(); \
+ new_object = Heap::roots_address()[root_id]; \
+ } else if (where == kPartialSnapshotCache) { \
+ int cache_index = source_->GetInt(); \
+ new_object = partial_snapshot_cache_[cache_index]; \
+ } else if (where == kExternalReference) { \
+ int reference_id = source_->GetInt(); \
+ Address address = \
+ external_reference_decoder_->Decode(reference_id); \
+ new_object = reinterpret_cast<Object*>(address); \
+ } else if (where == kBackref) { \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ new_object = GetAddressFromEnd(data & kSpaceMask); \
+ } else { \
+ ASSERT(where == kFromStart); \
+ if (offset_from_start == kUnknownOffsetFromStart) { \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ new_object = GetAddressFromStart(data & kSpaceMask); \
+ } else { \
+ Address object_address = pages_[space_number][0] + \
+ (offset_from_start << kObjectAlignmentBits); \
+ new_object = HeapObject::FromAddress(object_address); \
+ } \
+ } \
+ if (within == kFirstInstruction) { \
+ Code* new_code_object = reinterpret_cast<Code*>(new_object); \
+ new_object = reinterpret_cast<Object*>( \
+ new_code_object->instruction_start()); \
+ } \
+ if (how == kFromCode) { \
+ Address location_of_branch_data = \
+ reinterpret_cast<Address>(current); \
+ Assembler::set_target_at(location_of_branch_data, \
+ reinterpret_cast<Address>(new_object)); \
+ if (within == kFirstInstruction) { \
+ location_of_branch_data += Assembler::kCallTargetSize; \
+ current = reinterpret_cast<Object**>(location_of_branch_data); \
+ current_was_incremented = true; \
+ } \
+ } else { \
+ *current = new_object; \
+ } \
+ } \
+ if (emit_write_barrier) { \
+ Heap::RecordWrite(address, static_cast<int>( \
+ reinterpret_cast<Address>(current) - address)); \
+ } \
+ if (!current_was_incremented) { \
+ current++; /* Increment current if it wasn't done above. */ \
+ } \
+ break; \
+ } \
+
+// This generates a case and a body for each space. The large object spaces are
+// very rare in snapshots so they are grouped in one body.
+#define ONE_PER_SPACE(where, how, within) \
+ CASE_STATEMENT(where, how, within, NEW_SPACE) \
+ CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
+ CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
+ CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, kLargeData) \
+ CASE_STATEMENT(where, how, within, kLargeCode) \
+ CASE_STATEMENT(where, how, within, kLargeFixedArray) \
+ CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+
+// This generates a case and a body for the new space (which has to do extra
+// write barrier handling) and handles the other spaces with 8 fall-through
+// cases and one body.
+#define ALL_SPACES(where, how, within) \
+ CASE_STATEMENT(where, how, within, NEW_SPACE) \
+ CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ CASE_STATEMENT(where, how, within, kLargeData) \
+ CASE_STATEMENT(where, how, within, kLargeCode) \
+ CASE_STATEMENT(where, how, within, kLargeFixedArray) \
+ CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+
+#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
+ space_number, \
+ offset_from_start) \
+ CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \
+ CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
+
+ // We generate 15 cases and bodies that process special tags that combine
+ // the raw data tag and the length into one byte.
#define RAW_CASE(index, size) \
- case RAW_DATA_SERIALIZATION + index: { \
+ case kRawData + index: { \
byte* raw_data_out = reinterpret_cast<byte*>(current); \
source_->CopyRaw(raw_data_out, size); \
current = reinterpret_cast<Object**>(raw_data_out + size); \
@@ -688,144 +825,77 @@ void Deserializer::ReadChunk(Object** current,
}
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
- case RAW_DATA_SERIALIZATION: {
+
+ // Deserialize a chunk of raw data that doesn't have one of the popular
+ // lengths.
+ case kRawData: {
int size = source_->GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
source_->CopyRaw(raw_data_out, size);
current = reinterpret_cast<Object**>(raw_data_out + size);
break;
}
- case OBJECT_SERIALIZATION + NEW_SPACE: {
- ReadObject(NEW_SPACE, Heap::new_space(), current);
- if (space != NEW_SPACE) {
- Heap::RecordWrite(address, static_cast<int>(
- reinterpret_cast<Address>(current) - address));
- }
- current++;
- break;
- }
- case OBJECT_SERIALIZATION + OLD_DATA_SPACE:
- ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++);
- break;
- case OBJECT_SERIALIZATION + OLD_POINTER_SPACE:
- ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++);
- break;
- case OBJECT_SERIALIZATION + MAP_SPACE:
- ReadObject(MAP_SPACE, Heap::map_space(), current++);
- break;
- case OBJECT_SERIALIZATION + CODE_SPACE:
- ReadObject(CODE_SPACE, Heap::code_space(), current++);
- break;
- case OBJECT_SERIALIZATION + CELL_SPACE:
- ReadObject(CELL_SPACE, Heap::cell_space(), current++);
- break;
- case OBJECT_SERIALIZATION + kLargeData:
- ReadObject(kLargeData, Heap::lo_space(), current++);
- break;
- case OBJECT_SERIALIZATION + kLargeCode:
- ReadObject(kLargeCode, Heap::lo_space(), current++);
- break;
- case OBJECT_SERIALIZATION + kLargeFixedArray:
- ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
- break;
- case CODE_OBJECT_SERIALIZATION + kLargeCode: {
- Object* new_code_object = NULL;
- ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
- Code* code_object = reinterpret_cast<Code*>(new_code_object);
- // Setting a branch/call to another code object from code.
- Address location_of_branch_data = reinterpret_cast<Address>(current);
- Assembler::set_target_at(location_of_branch_data,
- code_object->instruction_start());
- location_of_branch_data += Assembler::kCallTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
- break;
- }
- case CODE_OBJECT_SERIALIZATION + CODE_SPACE: {
- Object* new_code_object = NULL;
- ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
- Code* code_object = reinterpret_cast<Code*>(new_code_object);
- // Setting a branch/call to another code object from code.
- Address location_of_branch_data = reinterpret_cast<Address>(current);
- Assembler::set_target_at(location_of_branch_data,
- code_object->instruction_start());
- location_of_branch_data += Assembler::kCallTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
- break;
- }
- ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) {
- // Write a backreference to an object we unpacked earlier.
- int backref_space = (data & kSpaceMask);
- if (backref_space == NEW_SPACE && space != NEW_SPACE) {
- Heap::RecordWrite(address, static_cast<int>(
- reinterpret_cast<Address>(current) - address));
- }
- *current++ = GetAddressFromEnd(backref_space);
- break;
- }
- ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) {
- // Write a reference to an object we unpacked earlier.
- int reference_space = (data & kSpaceMask);
- if (reference_space == NEW_SPACE && space != NEW_SPACE) {
- Heap::RecordWrite(address, static_cast<int>(
- reinterpret_cast<Address>(current) - address));
- }
- *current++ = GetAddressFromStart(reference_space);
- break;
- }
-#define COMMON_REFS_CASE(index, reference_space, address) \
- case REFERENCE_SERIALIZATION + index: { \
- ASSERT(SpaceIsPaged(reference_space)); \
- Address object_address = \
- pages_[reference_space][0] + (address << kObjectAlignmentBits); \
- *current++ = HeapObject::FromAddress(object_address); \
- break; \
- }
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
- ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) {
- int backref_space = (data & kSpaceMask);
- // Can't use Code::cast because heap is not set up yet and assertions
- // will fail.
- Code* code_object =
- reinterpret_cast<Code*>(GetAddressFromEnd(backref_space));
- // Setting a branch/call to previously decoded code object from code.
- Address location_of_branch_data = reinterpret_cast<Address>(current);
- Assembler::set_target_at(location_of_branch_data,
- code_object->instruction_start());
- location_of_branch_data += Assembler::kCallTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
- break;
- }
- ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) {
- int backref_space = (data & kSpaceMask);
- // Can't use Code::cast because heap is not set up yet and assertions
- // will fail.
- Code* code_object =
- reinterpret_cast<Code*>(GetAddressFromStart(backref_space));
- // Setting a branch/call to previously decoded code object from code.
- Address location_of_branch_data = reinterpret_cast<Address>(current);
- Assembler::set_target_at(location_of_branch_data,
- code_object->instruction_start());
- location_of_branch_data += Assembler::kCallTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
- break;
- }
- case EXTERNAL_REFERENCE_SERIALIZATION: {
- int reference_id = source_->GetInt();
- Address address = external_reference_decoder_->Decode(reference_id);
- *current++ = reinterpret_cast<Object*>(address);
- break;
- }
- case EXTERNAL_BRANCH_TARGET_SERIALIZATION: {
- int reference_id = source_->GetInt();
- Address address = external_reference_decoder_->Decode(reference_id);
- Address location_of_branch_data = reinterpret_cast<Address>(current);
- Assembler::set_external_target_at(location_of_branch_data, address);
- location_of_branch_data += Assembler::kExternalTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
- break;
- }
- case START_NEW_PAGE_SERIALIZATION: {
+
+ // Deserialize a new object and write a pointer to it to the current
+ // object.
+ ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
+ // Deserialize a new code object and write a pointer to its first
+ // instruction to the current code object.
+ ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
+ // Find a recently deserialized object using its offset from the current
+ // allocation point and write a pointer to it to the current object.
+ ALL_SPACES(kBackref, kPlain, kStartOfObject)
+ // Find a recently deserialized code object using its offset from the
+ // current allocation point and write a pointer to its first instruction
+ // to the current code object.
+ ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
+ // Find an already deserialized object using its offset from the start
+ // and write a pointer to it to the current object.
+ ALL_SPACES(kFromStart, kPlain, kStartOfObject)
+ // Find an already deserialized code object using its offset from the
+ // start and write a pointer to its first instruction to the current code
+ // object.
+ ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
+ // Find an already deserialized object at one of the predetermined popular
+ // offsets from the start and write a pointer to it in the current object.
+ COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
+ // Find an object in the roots array and write a pointer to it to the
+ // current object.
+ CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
+ CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart)
+ // Find an object in the partial snapshots cache and write a pointer to it
+ // to the current object.
+ CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+ CASE_BODY(kPartialSnapshotCache,
+ kPlain,
+ kStartOfObject,
+ 0,
+ kUnknownOffsetFromStart)
+ // Find an external reference and write a pointer to it to the current
+ // object.
+ CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
+ CASE_BODY(kExternalReference,
+ kPlain,
+ kStartOfObject,
+ 0,
+ kUnknownOffsetFromStart)
+ // Find an external reference and write a pointer to it in the current
+ // code object.
+ CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
+ CASE_BODY(kExternalReference,
+ kFromCode,
+ kStartOfObject,
+ 0,
+ kUnknownOffsetFromStart)
+
+#undef CASE_STATEMENT
+#undef CASE_BODY
+#undef ONE_PER_SPACE
+#undef ALL_SPACES
+#undef EMIT_COMMON_REFERENCE_PATTERNS
+#undef ASSIGN_DEST_SPACE
+
+ case kNewPage: {
int space = source_->Get();
pages_[space].Add(last_object_address_);
if (space == CODE_SPACE) {
@@ -833,7 +903,8 @@ void Deserializer::ReadChunk(Object** current,
}
break;
}
- case NATIVES_STRING_RESOURCE: {
+
+ case kNativesStringResource: {
int index = source_->Get();
Vector<const char> source_vector = Natives::GetScriptSource(index);
NativesExternalStringResource* resource =
@@ -841,21 +912,13 @@ void Deserializer::ReadChunk(Object** current,
*current++ = reinterpret_cast<Object*>(resource);
break;
}
- case ROOT_SERIALIZATION: {
- int root_id = source_->GetInt();
- *current++ = Heap::roots_address()[root_id];
- break;
- }
- case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
- int cache_index = source_->GetInt();
- *current++ = partial_snapshot_cache_[cache_index];
- break;
- }
- case SYNCHRONIZE: {
+
+ case kSynchronize: {
// If we get here then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
UNREACHABLE();
}
+
default:
UNREACHABLE();
}
@@ -880,7 +943,7 @@ void Deserializer::Synchronize(const char* tag) {
int data = source_->Get();
// If this assert fails then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
- ASSERT_EQ(SYNCHRONIZE, data);
+ ASSERT_EQ(kSynchronize, data);
do {
int character = source_->Get();
if (character == 0) break;
@@ -895,7 +958,7 @@ void Deserializer::Synchronize(const char* tag) {
void Serializer::Synchronize(const char* tag) {
- sink_->Put(SYNCHRONIZE, tag);
+ sink_->Put(kSynchronize, tag);
int character;
do {
character = *tag++;
@@ -957,13 +1020,13 @@ void PartialSerializer::Serialize(Object** object) {
void Serializer::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
- sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->Put(kRawData, "RawData");
sink_->PutInt(kPointerSize, "length");
for (int i = 0; i < kPointerSize; i++) {
sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
}
} else {
- SerializeObject(*current, TAGGED_REPRESENTATION);
+ SerializeObject(*current, kPlain, kStartOfObject);
}
}
}
@@ -1033,7 +1096,8 @@ int PartialSerializer::RootIndex(HeapObject* heap_object) {
void Serializer::SerializeReferenceToPreviousObject(
int space,
int address,
- ReferenceRepresentation reference_representation) {
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
@@ -1054,43 +1118,30 @@ void Serializer::SerializeReferenceToPreviousObject(
// If we are actually dealing with real offsets (and not a numbering of
// all objects) then we should shift out the bits that are always 0.
if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- // On some architectures references between code objects are encoded
- // specially (as relative offsets). Such references have their own
- // special tags to simplify the deserializer.
- if (reference_representation == CODE_TARGET_REPRESENTATION) {
- if (from_start) {
- sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
- sink_->PutInt(address, "address");
- } else {
- sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
- sink_->PutInt(address, "address");
- }
- } else {
- // Regular absolute references.
- CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
- if (from_start) {
- // There are some common offsets that have their own specialized encoding.
-#define COMMON_REFS_CASE(tag, common_space, common_offset) \
- if (space == common_space && address == common_offset) { \
- sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+ if (from_start) {
+#define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \
+ if (space == actual_space && address == offset && \
+ how_to_code == kPlain && where_to_point == kStartOfObject) { \
+ sink_->Put(kFromStart + how_to_code + where_to_point + \
+ pseudo_space, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
- sink_->PutInt(address, "address");
- }
- } else {
- sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ { /* NOLINT */
+ sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
sink_->PutInt(address, "address");
}
+ } else {
+ sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
+ sink_->PutInt(address, "address");
}
}
void StartupSerializer::SerializeObject(
Object* o,
- ReferenceRepresentation reference_representation) {
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
@@ -1099,13 +1150,15 @@ void StartupSerializer::SerializeObject(
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
- reference_representation);
+ how_to_code,
+ where_to_point);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this,
heap_object,
sink_,
- reference_representation);
+ how_to_code,
+ where_to_point);
object_serializer.Serialize();
}
}
@@ -1115,7 +1168,7 @@ void StartupSerializer::SerializeWeakReferences() {
for (int i = partial_snapshot_cache_length_;
i < kPartialSnapshotCacheCapacity;
i++) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
}
Heap::IterateWeakRoots(this, VISIT_ALL);
@@ -1124,20 +1177,22 @@ void StartupSerializer::SerializeWeakReferences() {
void PartialSerializer::SerializeObject(
Object* o,
- ReferenceRepresentation reference_representation) {
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
int root_index;
if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
sink_->PutInt(root_index, "root_index");
return;
}
if (ShouldBeInThePartialSnapshotCache(heap_object)) {
int cache_index = PartialSnapshotCacheIndex(heap_object);
- sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+ sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
+ "PartialSnapshotCache");
sink_->PutInt(cache_index, "partial_snapshot_cache_index");
return;
}
@@ -1155,13 +1210,15 @@ void PartialSerializer::SerializeObject(
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
- reference_representation);
+ how_to_code,
+ where_to_point);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
heap_object,
sink_,
- reference_representation);
+ how_to_code,
+ where_to_point);
serializer.Serialize();
}
}
@@ -1171,12 +1228,8 @@ void Serializer::ObjectSerializer::Serialize() {
int space = Serializer::SpaceOfObject(object_);
int size = object_->Size();
- if (reference_representation_ == TAGGED_REPRESENTATION) {
- sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization");
- } else {
- CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_);
- sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization");
- }
+ sink_->Put(kNewObject + reference_representation_ + space,
+ "ObjectSerialization");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
@@ -1186,12 +1239,12 @@ void Serializer::ObjectSerializer::Serialize() {
int offset = serializer_->Allocate(space, size, &start_new_page);
serializer_->address_mapper()->AddMapping(object_, offset);
if (start_new_page) {
- sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
+ sink_->Put(kNewPage, "NewPage");
sink_->PutSection(space, "NewPageSpace");
}
// Serialize the map (first word of the object).
- serializer_->SerializeObject(object_->map(), TAGGED_REPRESENTATION);
+ serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
@@ -1209,7 +1262,7 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
if (current < end) OutputRawData(reinterpret_cast<Address>(current));
while (current < end && !(*current)->IsSmi()) {
- serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
+ serializer_->SerializeObject(*current, kPlain, kStartOfObject);
bytes_processed_so_far_ += kPointerSize;
current++;
}
@@ -1223,7 +1276,7 @@ void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
OutputRawData(references_start);
for (Address* current = start; current < end; current++) {
- sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference");
+ sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
}
@@ -1237,7 +1290,14 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
Address target = rinfo->target_address();
uint32_t encoding = serializer_->EncodeExternalReference(target);
CHECK(target == NULL ? encoding == 0 : encoding != 0);
- sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference");
+ int representation;
+ // Can't use a ternary operator because of gcc.
+ if (rinfo->IsCodedSpecially()) {
+ representation = kStartOfObject + kFromCode;
+ } else {
+ representation = kStartOfObject + kPlain;
+ }
+ sink_->Put(kExternalReference + representation, "ExternalReference");
sink_->PutInt(encoding, "reference id");
bytes_processed_so_far_ += Assembler::kExternalTargetSize;
}
@@ -1248,7 +1308,7 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
+ serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
bytes_processed_so_far_ += Assembler::kCallTargetSize;
}
@@ -1264,7 +1324,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
typedef v8::String::ExternalAsciiStringResource Resource;
Resource* resource = string->resource();
if (resource == *resource_pointer) {
- sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource");
+ sink_->Put(kNativesStringResource, "NativesStringResource");
sink_->PutSection(i, "NativesStringResourceEnd");
bytes_processed_so_far_ += sizeof(resource);
return;
@@ -1288,12 +1348,12 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
Address base = object_start + bytes_processed_so_far_;
#define RAW_CASE(index, length) \
if (skipped == length) { \
- sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed"); \
+ sink_->PutSection(kRawData + index, "RawDataFixed"); \
} else /* NOLINT */
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
{ /* NOLINT */
- sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->Put(kRawData, "RawData");
sink_->PutInt(skipped, "length");
}
for (int i = 0; i < skipped; i++) {
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 279bc583a3..6a318f1936 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -137,17 +137,23 @@ class SnapshotByteSource {
};
-// It is very common to have a reference to the object at word 10 in space 2,
-// the object at word 5 in space 2 and the object at word 28 in space 4. This
-// only works for objects in the first page of a space.
-#define COMMON_REFERENCE_PATTERNS(f) \
- f(kNumberOfSpaces, 2, 10) \
- f(kNumberOfSpaces + 1, 2, 5) \
- f(kNumberOfSpaces + 2, 4, 28) \
- f(kNumberOfSpaces + 3, 2, 21) \
- f(kNumberOfSpaces + 4, 2, 98) \
- f(kNumberOfSpaces + 5, 2, 67) \
- f(kNumberOfSpaces + 6, 4, 132)
+// It is very common to have a reference to objects at certain offsets in the
+// heap. These offsets have been determined experimentally. We code
+// references to such objects in a single byte that encodes the way the pointer
+// is written (only plain pointers allowed), the space number and the offset.
+// This only works for objects in the first page of a space. Don't use this for
+// things in newspace since it bypasses the write barrier.
+
+static const int k64 = (sizeof(uintptr_t) - 4) / 4;
+
+#define COMMON_REFERENCE_PATTERNS(f) \
+ f(kNumberOfSpaces, 2, (11 - k64)) \
+ f((kNumberOfSpaces + 1), 2, 0) \
+ f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \
+ f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \
+ f((kNumberOfSpaces + 4), 2, 5) \
+ f((kNumberOfSpaces + 5), 1, 135) \
+ f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
#define COMMON_RAW_LENGTHS(f) \
f(1, 1) \
@@ -175,37 +181,63 @@ class SerializerDeserializer: public ObjectVisitor {
static void SetSnapshotCacheSize(int size);
protected:
- enum DataType {
- RAW_DATA_SERIALIZATION = 0,
- // And 15 common raw lengths.
- OBJECT_SERIALIZATION = 16,
- // One variant per space.
- CODE_OBJECT_SERIALIZATION = 25,
- // One per space (only code spaces in use).
- EXTERNAL_REFERENCE_SERIALIZATION = 34,
- EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35,
- SYNCHRONIZE = 36,
- START_NEW_PAGE_SERIALIZATION = 37,
- NATIVES_STRING_RESOURCE = 38,
- ROOT_SERIALIZATION = 39,
- PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
- // Free: 41-47.
- BACKREF_SERIALIZATION = 48,
- // One per space, must be kSpaceMask aligned.
- // Free: 57-63.
- REFERENCE_SERIALIZATION = 64,
- // One per space and common references. Must be kSpaceMask aligned.
- CODE_BACKREF_SERIALIZATION = 80,
- // One per space, must be kSpaceMask aligned.
- // Free: 89-95.
- CODE_REFERENCE_SERIALIZATION = 96
- // One per space, must be kSpaceMask aligned.
- // Free: 105-255.
+ // Where the pointed-to object can be found:
+ enum Where {
+ kNewObject = 0, // Object is next in snapshot.
+ // 1-8 One per space.
+ kRootArray = 0x9, // Object is found in root array.
+ kPartialSnapshotCache = 0xa, // Object is in the cache.
+ kExternalReference = 0xb, // Pointer to an external reference.
+ // 0xc-0xf Free.
+ kBackref = 0x10, // Object is described relative to end.
+ // 0x11-0x18 One per space.
+ // 0x19-0x1f Common backref offsets.
+ kFromStart = 0x20, // Object is described relative to start.
+ // 0x21-0x28 One per space.
+ // 0x29-0x2f Free.
+ // 0x30-0x3f Used by misc tags below.
+ kPointedToMask = 0x3f
};
+
+ // How to code the pointer to the object.
+ enum HowToCode {
+ kPlain = 0, // Straight pointer.
+ // What this means depends on the architecture:
+ kFromCode = 0x40, // A pointer inlined in code.
+ kHowToCodeMask = 0x40
+ };
+
+ // Where to point within the object.
+ enum WhereToPoint {
+ kStartOfObject = 0,
+ kFirstInstruction = 0x80,
+ kWhereToPointMask = 0x80
+ };
+
+ // Misc.
+ // Raw data to be copied from the snapshot.
+ static const int kRawData = 0x30;
+ // Some common raw lengths: 0x31-0x3f
+ // A tag emitted at strategic points in the snapshot to delineate sections.
+ // If the deserializer does not find these at the expected moments then it
+ // is an indication that the snapshot and the VM do not fit together.
+ // Examine the build process for architecture, version or configuration
+ // mismatches.
+ static const int kSynchronize = 0x70;
+ // Used for the source code of the natives, which is in the executable, but
+ // is referred to from external strings in the snapshot.
+ static const int kNativesStringResource = 0x71;
+ static const int kNewPage = 0x72;
+ // 0x73-0x7f Free.
+ // 0xb0-0xbf Free.
+ // 0xf0-0xff Free.
+
+
static const int kLargeData = LAST_SPACE;
static const int kLargeCode = kLargeData + 1;
static const int kLargeFixedArray = kLargeCode + 1;
static const int kNumberOfSpaces = kLargeFixedArray + 1;
+ static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
static const int kSpaceMask = 15;
@@ -396,10 +428,6 @@ class Serializer : public SerializerDeserializer {
#endif
protected:
- enum ReferenceRepresentation {
- TAGGED_REPRESENTATION, // A tagged object reference.
- CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
- };
static const int kInvalidRootIndex = -1;
virtual int RootIndex(HeapObject* heap_object) = 0;
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
@@ -409,11 +437,12 @@ class Serializer : public SerializerDeserializer {
ObjectSerializer(Serializer* serializer,
Object* o,
SnapshotByteSink* sink,
- ReferenceRepresentation representation)
+ HowToCode how_to_code,
+ WhereToPoint where_to_point)
: serializer_(serializer),
object_(HeapObject::cast(o)),
sink_(sink),
- reference_representation_(representation),
+ reference_representation_(how_to_code + where_to_point),
bytes_processed_so_far_(0) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
@@ -435,16 +464,18 @@ class Serializer : public SerializerDeserializer {
Serializer* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
- ReferenceRepresentation reference_representation_;
+ int reference_representation_;
int bytes_processed_so_far_;
};
virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation) = 0;
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) = 0;
void SerializeReferenceToPreviousObject(
int space,
int address,
- ReferenceRepresentation reference_representation);
+ HowToCode how_to_code,
+ WhereToPoint where_to_point);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
@@ -492,7 +523,8 @@ class PartialSerializer : public Serializer {
// Serialize the objects reachable from a single object pointer.
virtual void Serialize(Object** o);
virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation);
+ HowToCode how_to_code,
+ WhereToPoint where_to_point);
protected:
virtual int RootIndex(HeapObject* o);
@@ -528,7 +560,8 @@ class StartupSerializer : public Serializer {
// 3) Weak references (eg the symbol table).
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation);
+ HowToCode how_to_code,
+ WhereToPoint where_to_point);
void SerializeWeakReferences();
void Serialize() {
SerializeStrongReferences();
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 9433249188..59a501f9ee 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -241,7 +241,13 @@ function StringReplace(search, replace) {
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (IS_FUNCTION(replace)) {
regExpCache.type = 'none';
- return StringReplaceRegExpWithFunction(subject, search, replace);
+ if (search.global) {
+ return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+ } else {
+ return StringReplaceNonGlobalRegExpWithFunction(subject,
+ search,
+ replace);
+ }
} else {
return StringReplaceRegExp(subject, search, replace);
}
@@ -396,9 +402,9 @@ function CaptureString(string, lastCaptureInfo, index) {
var scaled = index << 1;
// Compute start and end.
var start = lastCaptureInfo[CAPTURE(scaled)];
+ // If start isn't valid, return undefined.
+ if (start < 0) return;
var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- // If either start or end is missing return undefined.
- if (start < 0 || end < 0) return;
return SubString(string, start, end);
};
@@ -410,9 +416,8 @@ function addCaptureString(builder, matchInfo, index) {
var scaled = index << 1;
// Compute start and end.
var start = matchInfo[CAPTURE(scaled)];
+ if (start < 0) return;
var end = matchInfo[CAPTURE(scaled + 1)];
- // If either start or end is missing return.
- if (start < 0 || end <= start) return;
builder.addSpecialSlice(start, end);
};
@@ -423,112 +428,116 @@ var reusableReplaceArray = $Array(16);
// Helper function for replacing regular expressions with the result of a
// function application in String.prototype.replace.
-function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- if (regexp.global) {
- var resultArray = reusableReplaceArray;
- if (resultArray) {
- reusableReplaceArray = null;
- } else {
- // Inside a nested replace (replace called from the replacement function
- // of another replace) or we have failed to set the reusable array
- // back due to an exception in a replacement function. Create a new
- // array to use in the future, or until the original is written back.
- resultArray = $Array(16);
- }
-
- var res = %RegExpExecMultiple(regexp,
- subject,
- lastMatchInfo,
- resultArray);
- regexp.lastIndex = 0;
- if (IS_NULL(res)) {
- // No matches at all.
- return subject;
- }
- var len = res.length;
- var i = 0;
- if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
- var match_start = 0;
- var override = [null, 0, subject];
- while (i < len) {
- var elem = res[i];
- if (%_IsSmi(elem)) {
- if (elem > 0) {
- match_start = (elem >> 11) + (elem & 0x7ff);
- } else {
- match_start = res[++i] - elem;
- }
+function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
+ var resultArray = reusableReplaceArray;
+ if (resultArray) {
+ reusableReplaceArray = null;
+ } else {
+ // Inside a nested replace (replace called from the replacement function
+ // of another replace) or we have failed to set the reusable array
+ // back due to an exception in a replacement function. Create a new
+ // array to use in the future, or until the original is written back.
+ resultArray = $Array(16);
+ }
+ var res = %RegExpExecMultiple(regexp,
+ subject,
+ lastMatchInfo,
+ resultArray);
+ regexp.lastIndex = 0;
+ if (IS_NULL(res)) {
+ // No matches at all.
+ reusableReplaceArray = resultArray;
+ return subject;
+ }
+ var len = res.length;
+ var i = 0;
+ if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+ var match_start = 0;
+ var override = [null, 0, subject];
+ var receiver = %GetGlobalReceiver();
+ while (i < len) {
+ var elem = res[i];
+ if (%_IsSmi(elem)) {
+ if (elem > 0) {
+ match_start = (elem >> 11) + (elem & 0x7ff);
} else {
- override[0] = elem;
- override[1] = match_start;
- lastMatchInfoOverride = override;
- var func_result = replace.call(null, elem, match_start, subject);
- if (!IS_STRING(func_result)) {
- func_result = NonStringToString(func_result);
- }
- res[i] = func_result;
- match_start += elem.length;
+ match_start = res[++i] - elem;
}
- i++;
+ } else {
+ override[0] = elem;
+ override[1] = match_start;
+ lastMatchInfoOverride = override;
+ var func_result =
+ %_CallFunction(receiver, elem, match_start, subject, replace);
+ if (!IS_STRING(func_result)) {
+ func_result = NonStringToString(func_result);
+ }
+ res[i] = func_result;
+ match_start += elem.length;
}
- } else {
- while (i < len) {
- var elem = res[i];
- if (!%_IsSmi(elem)) {
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp properties.
- lastMatchInfoOverride = elem;
- var func_result = replace.apply(null, elem);
- if (!IS_STRING(func_result)) {
- func_result = NonStringToString(func_result);
- }
- res[i] = func_result;
+ i++;
+ }
+ } else {
+ while (i < len) {
+ var elem = res[i];
+ if (!%_IsSmi(elem)) {
+ // elem must be an Array.
+ // Use the apply argument as backing for global RegExp properties.
+ lastMatchInfoOverride = elem;
+ var func_result = replace.apply(null, elem);
+ if (!IS_STRING(func_result)) {
+ func_result = NonStringToString(func_result);
}
- i++;
+ res[i] = func_result;
}
+ i++;
}
- var resultBuilder = new ReplaceResultBuilder(subject, res);
- var result = resultBuilder.generate();
- resultArray.length = 0;
- reusableReplaceArray = resultArray;
- return result;
- } else { // Not a global regexp, no need to loop.
- var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
-
- var result = new ReplaceResultBuilder(subject);
- result.addSpecialSlice(0, matchInfo[CAPTURE0]);
- var endOfMatch = matchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, matchInfo, subject));
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- result.addSpecialSlice(endOfMatch, subject.length);
- return result.generate();
}
+ var resultBuilder = new ReplaceResultBuilder(subject, res);
+ var result = resultBuilder.generate();
+ resultArray.length = 0;
+ reusableReplaceArray = resultArray;
+ return result;
}
-// Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, matchInfo, subject) {
+function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) return subject;
+ var result = new ReplaceResultBuilder(subject);
+ var index = matchInfo[CAPTURE0];
+ result.addSpecialSlice(0, index);
+ var endOfMatch = matchInfo[CAPTURE1];
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
- var index = matchInfo[CAPTURE0];
// The number of captures plus one for the match.
var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
+ var replacement;
if (m == 1) {
- var s = CaptureString(subject, matchInfo, 0);
+ // No captures, only the match, which is always valid.
+ var s = SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
- return replace.call(null, s, index, subject);
- }
- var parameters = $Array(m + 2);
- for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, matchInfo, j);
+ replacement =
+ %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
+ } else {
+ var parameters = $Array(m + 2);
+ for (var j = 0; j < m; j++) {
+ parameters[j] = CaptureString(subject, matchInfo, j);
+ }
+ parameters[j] = index;
+ parameters[j + 1] = subject;
+
+ replacement = replace.apply(null, parameters);
}
- parameters[j] = index;
- parameters[j + 1] = subject;
- return replace.apply(null, parameters);
+
+ result.add(replacement); // The add method converts to string if necessary.
+ // Can't use matchInfo any more from here, since the function could
+ // overwrite it.
+ result.addSpecialSlice(endOfMatch, subject.length);
+ return result.generate();
}
+
// ECMA-262 section 15.5.4.12
function StringSearch(re) {
var regexp;
diff --git a/deps/v8/src/third_party/dtoa/dtoa.c b/deps/v8/src/third_party/dtoa/dtoa.c
index 8917d9d8bf..178b3d12d1 100644
--- a/deps/v8/src/third_party/dtoa/dtoa.c
+++ b/deps/v8/src/third_party/dtoa/dtoa.c
@@ -164,8 +164,12 @@
*/
#ifndef Long
+#if __LP64__
+#define Long int
+#else
#define Long long
#endif
+#endif
#ifndef ULong
typedef unsigned Long ULong;
#endif
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 66a20eeeb4..531bd0ef31 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -225,7 +225,7 @@ function ObjectHasOwnProperty(V) {
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
- if (!IS_OBJECT(V) && !IS_FUNCTION(V) && !IS_UNDETECTABLE(V)) return false;
+ if (!IS_SPEC_OBJECT_OR_NULL(V) && !IS_UNDETECTABLE(V)) return false;
return %IsInPrototypeChain(this, V);
}
@@ -233,7 +233,7 @@ function ObjectIsPrototypeOf(V) {
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
if (this == null) return false;
- if (!IS_OBJECT(this) && !IS_FUNCTION(this)) return false;
+ if (!IS_SPEC_OBJECT_OR_NULL(this)) return false;
return %IsPropertyEnumerable(this, ToString(V));
}
@@ -279,7 +279,7 @@ function ObjectLookupSetter(name) {
function ObjectKeys(obj) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
@@ -329,7 +329,7 @@ function FromPropertyDescriptor(desc) {
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
- if (!IS_OBJECT(obj)) {
+ if (!IS_SPEC_OBJECT_OR_NULL(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
@@ -599,7 +599,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
return obj.__proto__;
@@ -608,7 +608,7 @@ function ObjectGetPrototypeOf(obj) {
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
@@ -618,7 +618,7 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
@@ -660,8 +660,7 @@ function ObjectGetOwnPropertyNames(obj) {
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
- // IS_OBJECT will return true on null covering that case.
- if (!IS_OBJECT(proto) && !IS_FUNCTION(proto)) {
+ if (!IS_SPEC_OBJECT_OR_NULL(proto)) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
@@ -673,7 +672,7 @@ function ObjectCreate(proto, properties) {
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
var name = ToString(p);
@@ -685,7 +684,7 @@ function ObjectDefineProperty(obj, p, attributes) {
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
var props = ToObject(properties);
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 562f3acd44..b05251f680 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 9
+#define BUILD_NUMBER 11
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/virtual-frame-heavy-inl.h b/deps/v8/src/virtual-frame-heavy-inl.h
index 6381d01266..2755eee648 100644
--- a/deps/v8/src/virtual-frame-heavy-inl.h
+++ b/deps/v8/src/virtual-frame-heavy-inl.h
@@ -31,6 +31,8 @@
#include "type-info.h"
#include "register-allocator.h"
#include "scopes.h"
+#include "register-allocator-inl.h"
+#include "codegen-inl.h"
namespace v8 {
namespace internal {
@@ -147,6 +149,44 @@ void VirtualFrame::Push(Smi* value) {
Push(Handle<Object> (value));
}
+
+int VirtualFrame::register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+}
+
+
+void VirtualFrame::set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+}
+
+
+bool VirtualFrame::is_used(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
+}
+
+
+void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
+ Result temp(value);
+ SetElementAt(index, &temp);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+}
+
+
+int VirtualFrame::parameter_count() {
+ return cgen()->scope()->num_parameters();
+}
+
+
+int VirtualFrame::local_count() {
+ return cgen()->scope()->num_stack_slots();
+}
+
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/deps/v8/src/virtual-frame-light-inl.h b/deps/v8/src/virtual-frame-light-inl.h
index c50e6c8cf8..17b1c50439 100644
--- a/deps/v8/src/virtual-frame-light-inl.h
+++ b/deps/v8/src/virtual-frame-light-inl.h
@@ -28,13 +28,23 @@
#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
-#include "type-info.h"
+#include "codegen.h"
#include "register-allocator.h"
#include "scopes.h"
+#include "type-info.h"
+
+#include "codegen-inl.h"
+#include "jump-target-light-inl.h"
namespace v8 {
namespace internal {
+VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
+ : element_count_(0),
+ top_of_stack_state_(NO_TOS_REGISTERS),
+ register_allocation_map_(0) { }
+
+
// On entry to a function, the virtual frame already contains the receiver,
// the parameters, and a return address. All frame elements are in memory.
VirtualFrame::VirtualFrame()
@@ -64,6 +74,87 @@ void VirtualFrame::PrepareForReturn() {
}
+VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
+ CodeGenerator* cgen)
+ : cgen_(cgen),
+ old_is_spilled_(SpilledScope::is_spilled_) {
+ SpilledScope::is_spilled_ = false;
+ if (old_is_spilled_) {
+ VirtualFrame* frame = cgen->frame();
+ if (frame != NULL) {
+ frame->AssertIsSpilled();
+ }
+ }
+}
+
+
+VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
+ SpilledScope::is_spilled_ = old_is_spilled_;
+ if (old_is_spilled_) {
+ VirtualFrame* frame = cgen_->frame();
+ if (frame != NULL) {
+ frame->SpillAll();
+ }
+ }
+}
+
+
+CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
+
+
+MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
+
+
+void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
+ if (arg_count != 0) Forget(arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ masm()->CallStub(stub);
+}
+
+
+int VirtualFrame::parameter_count() {
+ return cgen()->scope()->num_parameters();
+}
+
+
+int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
+
+
+int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
+
+
+int VirtualFrame::context_index() { return frame_pointer() - 1; }
+
+
+int VirtualFrame::function_index() { return frame_pointer() - 2; }
+
+
+int VirtualFrame::local0_index() { return frame_pointer() + 2; }
+
+
+int VirtualFrame::fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
+}
+
+
+int VirtualFrame::expression_base_index() {
+ return local0_index() + local_count();
+}
+
+
+int VirtualFrame::height() {
+ return element_count() - expression_base_index();
+}
+
+
+MemOperand VirtualFrame::LocalAt(int index) {
+ ASSERT(0 <= index);
+ ASSERT(index < local_count());
+ return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+}
+
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/deps/v8/src/virtual-frame-light.cc b/deps/v8/src/virtual-frame-light.cc
index 27c48a537c..9c019cf7ff 100644
--- a/deps/v8/src/virtual-frame-light.cc
+++ b/deps/v8/src/virtual-frame-light.cc
@@ -46,4 +46,7 @@ Register VirtualFrame::SpillAnyRegister() {
return no_reg;
}
+
+InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 1c00ebca4b..3d03949f25 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "macro-assembler.h"
#include "serialize.h"
@@ -458,19 +460,36 @@ void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
+ ASSERT((opcode & 0xC6) == 2);
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ // Swap reg and rm_reg and change opcode operand order.
+ emit_rex_64(rm_reg, reg);
+ emit(opcode ^ 0x02);
+ emit_modrm(rm_reg, reg);
+ } else {
+ emit_rex_64(reg, rm_reg);
+ emit(opcode);
+ emit_modrm(reg, rm_reg);
+ }
}
void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
+ ASSERT((opcode & 0xC6) == 2);
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ // Swap reg and rm_reg and change opcode operand order.
+ emit(0x66);
+ emit_optional_rex_32(rm_reg, reg);
+ emit(opcode ^ 0x02);
+ emit_modrm(rm_reg, reg);
+ } else {
+ emit(0x66);
+ emit_optional_rex_32(reg, rm_reg);
+ emit(opcode);
+ emit_modrm(reg, rm_reg);
+ }
}
@@ -489,9 +508,17 @@ void Assembler::arithmetic_op_16(byte opcode,
void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
+ ASSERT((opcode & 0xC6) == 2);
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ // Swap reg and rm_reg and change opcode operand order.
+ emit_optional_rex_32(rm_reg, reg);
+ emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
+ emit_modrm(rm_reg, reg);
+ } else {
+ emit_optional_rex_32(reg, rm_reg);
+ emit(opcode);
+ emit_modrm(reg, rm_reg);
+ }
}
@@ -1290,9 +1317,15 @@ void Assembler::movl(Register dst, const Operand& src) {
void Assembler::movl(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
+ if (src.low_bits() == 4) {
+ emit_optional_rex_32(src, dst);
+ emit(0x89);
+ emit_modrm(src, dst);
+ } else {
+ emit_optional_rex_32(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+ }
}
@@ -1337,9 +1370,15 @@ void Assembler::movq(Register dst, const Operand& src) {
void Assembler::movq(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
+ if (src.low_bits() == 4) {
+ emit_rex_64(src, dst);
+ emit(0x89);
+ emit_modrm(src, dst);
+ } else {
+ emit_rex_64(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+ }
}
@@ -1860,6 +1899,10 @@ void Assembler::xchg(Register dst, Register src) {
Register other = src.is(rax) ? dst : src;
emit_rex_64(other);
emit(0x90 | other.low_bits());
+ } else if (dst.low_bits() == 4) {
+ emit_rex_64(dst, src);
+ emit(0x87);
+ emit_modrm(dst, src);
} else {
emit_rex_64(src, dst);
emit(0x87);
@@ -1885,12 +1928,18 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (dst.code() > 3 || src.code() > 3) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
+ if (src.low_bits() == 4) {
+ emit_rex_32(src, dst);
+ emit(0x84);
+ emit_modrm(src, dst);
+ } else {
+ if (dst.code() > 3 || src.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst, src);
+ }
+ emit(0x84);
+ emit_modrm(dst, src);
}
- emit(0x84);
- emit_modrm(dst, src);
}
@@ -1941,9 +1990,15 @@ void Assembler::testb(const Operand& op, Register reg) {
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
+ if (src.low_bits() == 4) {
+ emit_optional_rex_32(src, dst);
+ emit(0x85);
+ emit_modrm(src, dst);
+ } else {
+ emit_optional_rex_32(dst, src);
+ emit(0x85);
+ emit_modrm(dst, src);
+ }
}
@@ -1994,9 +2049,15 @@ void Assembler::testq(const Operand& op, Register reg) {
void Assembler::testq(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
+ if (src.low_bits() == 4) {
+ emit_rex_64(src, dst);
+ emit(0x85);
+ emit_modrm(src, dst);
+ } else {
+ emit_rex_64(dst, src);
+ emit(0x85);
+ emit_modrm(dst, src);
+ }
}
@@ -2510,6 +2571,17 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
}
+void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2728,4 +2800,16 @@ const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::JS_RETURN;
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on x64 means that it is a relative 32 bit address, as used
+ // by branch instructions.
+ return (1 << rmode_) & kApplyMask;
+}
+
+
+
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index d0778658ba..b55a7b7263 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -567,11 +567,7 @@ class Assembler : public Malloced {
// Arithmetics
void addl(Register dst, Register src) {
- if (dst.low_bits() == 4) { // Forces SIB byte.
- arithmetic_op_32(0x01, src, dst);
- } else {
- arithmetic_op_32(0x03, dst, src);
- }
+ arithmetic_op_32(0x03, dst, src);
}
void addl(Register dst, Immediate src) {
@@ -606,6 +602,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x0, dst, src);
}
+ void sbbl(Register dst, Register src) {
+ arithmetic_op_32(0x1b, dst, src);
+ }
+
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -1092,6 +1092,7 @@ class Assembler : public Malloced {
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, Register src);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index c55a4ea576..8099febb7f 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "macro-assembler.h"
@@ -1296,3 +1299,5 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 740be83c2f..6f6670aa3b 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
@@ -277,7 +279,6 @@ class FloatingPointHelper : public AllStatic {
// Takes the operands in rdx and rax and loads them as integers in rax
// and rcx.
static void LoadAsIntegers(MacroAssembler* masm,
- bool use_sse3,
Label* operand_conversion_failure);
};
@@ -679,11 +680,51 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
void DeferredReferenceSetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Push receiver and key arguments on the stack.
- __ push(receiver_);
- __ push(key_);
- // Move value argument to eax as expected by the IC stub.
- if (!value_.is(rax)) __ movq(rax, value_);
+ // Move value, receiver, and key to registers rax, rdx, and rcx, as
+ // the IC stub expects.
+ // Move value to rax, using xchg if the receiver or key is in rax.
+ if (!value_.is(rax)) {
+ if (!receiver_.is(rax) && !key_.is(rax)) {
+ __ movq(rax, value_);
+ } else {
+ __ xchg(rax, value_);
+ // Update receiver_ and key_ if they are affected by the swap.
+ if (receiver_.is(rax)) {
+ receiver_ = value_;
+ } else if (receiver_.is(value_)) {
+ receiver_ = rax;
+ }
+ if (key_.is(rax)) {
+ key_ = value_;
+ } else if (key_.is(value_)) {
+ key_ = rax;
+ }
+ }
+ }
+ // Value is now in rax. Its original location is remembered in value_,
+ // and the value is restored to value_ before returning.
+ // The variables receiver_ and key_ are not preserved.
+ // Move receiver and key to rdx and rcx, swapping if necessary.
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rcx)) {
+ __ movq(rcx, key_);
+ } // Else everything is already in the right place.
+ } else if (receiver_.is(rcx)) {
+ if (key_.is(rdx)) {
+ __ xchg(rcx, rdx);
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rcx, key_);
+ }
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rcx, key_);
+ __ movq(rdx, receiver_);
+ }
+
// Call the IC stub.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -696,11 +737,8 @@ void DeferredReferenceSetKeyedValue::Generate() {
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC), key and receiver
- // registers.
+ // Restore value (returned from store IC).
if (!value_.is(rax)) __ movq(value_, rax);
- __ pop(key_);
- __ pop(receiver_);
}
@@ -1547,7 +1585,7 @@ void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
}
Result var = frame_->Pop();
var.ToRegister();
- __ AbortIfNotSmi(var.reg(), "Non-smi value in smi-typed stack slot.");
+ __ AbortIfNotSmi(var.reg());
}
}
@@ -2800,6 +2838,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval.
@@ -2849,6 +2888,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Push the name of the function on the frame.
@@ -2868,59 +2908,26 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript examples:
//
- // with (obj) foo(1, 2, 3) // foo is in obj
+ // with (obj) foo(1, 2, 3) // foo may be in obj.
//
// function f() {};
// function g() {
// eval(...);
- // f(); // f could be in extension object
+ // f(); // f could be in extension object.
// }
// ----------------------------------
- JumpTarget slow;
- JumpTarget done;
-
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
+ JumpTarget slow, done;
Result function;
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
- function = LoadFromGlobalSlotCheckExtensions(var->slot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&function);
- LoadGlobalReceiver();
- done.Jump();
-
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = var->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- function = allocator()->Allocate();
- ASSERT(function.is_valid());
- __ movq(function.reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- function,
- &slow));
- JumpTarget push_function_and_receiver;
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(function.reg(), Heap::kTheHoleValueRootIndex);
- push_function_and_receiver.Branch(not_equal, &function);
- __ LoadRoot(function.reg(), Heap::kUndefinedValueRootIndex);
- }
- push_function_and_receiver.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- done.Jump();
- }
- }
+
+ // Generate fast case for loading functions from slots that
+ // correspond to local/global variables or arguments unless they
+ // are shadowed by eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &function,
+ &slow,
+ &done);
slow.Bind();
// Load the function from the context. Sync the frame so we can
@@ -2941,7 +2948,18 @@ void CodeGenerator::VisitCall(Call* node) {
ASSERT(!allocator()->is_used(rdx));
frame_->EmitPush(rdx);
- done.Bind();
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ JumpTarget call;
+ call.Jump();
+ done.Bind(&function);
+ frame_->Push(&function);
+ LoadGlobalReceiver();
+ call.Bind();
+ }
+
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -2976,6 +2994,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Push the name of the function onto the frame.
@@ -3422,7 +3441,11 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
new_value.type_info());
}
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
+ if (new_value.is_smi()) {
+ if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
+ } else {
+ __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
+ }
if (is_increment) {
__ SmiAddConstant(kScratchRegister,
new_value.reg(),
@@ -3856,11 +3879,13 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(less);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ movzxbq(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
obj.Unuse();
- destination()->Split(less_equal);
+ destination()->Split(below_equal);
}
@@ -3944,7 +3969,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
__ bind(&exit);
result.set_type_info(TypeInfo::Smi());
if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg(), "Computed arguments.length is not a smi.");
+ __ AbortIfNotSmi(result.reg());
}
frame_->Push(&result);
}
@@ -4352,7 +4377,7 @@ void CodeGenerator::GenerateRandomHeapNumber(
__ PrepareCallCFunction(0);
__ CallCFunction(ExternalReference::random_uint32_function(), 0);
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
__ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
@@ -5123,10 +5148,9 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
value.ToRegister();
if (value.is_number()) {
- Comment cmnt(masm_, "ONLY_NUMBER");
// Fast case if TypeInfo indicates only numbers.
if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+ __ AbortIfNotNumber(value.reg());
}
// Smi => false iff zero.
__ SmiCompare(value.reg(), Smi::FromInt(0));
@@ -5225,6 +5249,11 @@ void CodeGenerator::LoadReference(Reference* ref) {
// The expression is a variable proxy that does not rewrite to a
// property. Global variables are treated as named property references.
if (var->is_global()) {
+ // If rax is free, the register allocator prefers it. Thus the code
+ // generator will load the global object into rax, which is where
+ // LoadIC wants it. Most uses of Reference call LoadIC directly
+ // after the reference is created.
+ frame_->Spill(rax);
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
@@ -5336,47 +5365,14 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
JumpTarget done;
Result value;
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
- // If there was no control flow to slow, we can exit early.
- if (!slow.is_linked()) {
- frame_->Push(&value);
- return;
- }
-
- done.Jump(&value);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- // Only generate the fast case for locals that rewrite to slots.
- // This rules out argument loads because eval forces arguments
- // access to be through the arguments object.
- if (potential_slot != NULL) {
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- value = allocator_->Allocate();
- ASSERT(value.is_valid());
- __ movq(value.reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- value,
- &slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- done.Branch(not_equal, &value);
- __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- }
- // There is always control flow to slow from
- // ContextSlotOperandCheckExtensions so we have to jump around
- // it.
- done.Jump(&value);
- }
- }
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &value,
+ &slow,
+ &done);
slow.Bind();
// A runtime call is inevitable. We eagerly sync frame elements
@@ -5642,6 +5638,71 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
}
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ done->Jump(result);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ *result = allocator_->Allocate();
+ ASSERT(result->is_valid());
+ __ movq(result->reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ *result,
+ slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
+ done->Branch(not_equal, result);
+ __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
+ }
+ done->Jump(result);
+ } else if (rewrite != NULL) {
+ // Generate fast case for argument loads.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ Result arguments = allocator()->Allocate();
+ ASSERT(arguments.is_valid());
+ __ movq(arguments.reg(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ arguments,
+ slow));
+ frame_->Push(&arguments);
+ frame_->Push(key_literal->handle());
+ *result = EmitKeyedLoad(false);
+ frame_->Drop(2); // Drop key and receiver.
+ done->Jump(result);
+ }
+ }
+ }
+ }
+}
+
+
void CodeGenerator::LoadGlobal() {
if (in_spilled_code()) {
frame_->EmitPush(GlobalObject());
@@ -5862,7 +5923,7 @@ void CodeGenerator::Comparison(AstNode* node,
if (left_side.is_smi()) {
if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg(), "Non-smi value inferred as smi.");
+ __ AbortIfNotSmi(left_side.reg());
}
} else {
Condition left_is_smi = masm_->CheckSmi(left_side.reg());
@@ -6734,8 +6795,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
Condition is_smi = masm_->CheckSmi(operand->reg());
deferred->Branch(NegateCondition(is_smi));
} else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg(),
- "Static type info claims non-smi is smi in (const SHL smi).");
+ __ AbortIfNotSmi(operand->reg());
}
__ Move(answer.reg(), smi_value);
@@ -6997,7 +7057,43 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
rcx,
overwrite_mode);
- __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+
+ Label do_op;
+ if (right_type_info.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right->reg());
+ }
+ __ movq(answer.reg(), left->reg());
+ // If left is not known to be a smi, check if it is.
+ // If left is not known to be a number, and it isn't a smi, check if
+ // it is a HeapNumber.
+ if (!left_type_info.IsSmi()) {
+ __ JumpIfSmi(answer.reg(), &do_op);
+ if (!left_type_info.IsNumber()) {
+ // Branch if not a heapnumber.
+ __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ deferred->Branch(not_equal);
+ }
+ // Load integer value into answer register using truncation.
+ __ cvttsd2si(answer.reg(),
+ FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+ // Branch if we might have overflowed.
+ // (False negative for Smi::kMinValue)
+ __ cmpq(answer.reg(), Immediate(0x80000000));
+ deferred->Branch(equal);
+ // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
+ __ Integer32ToSmi(answer.reg(), answer.reg());
+ } else {
+ // Fast case - both are actually smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left->reg());
+ }
+ }
+ } else {
+ __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+ }
+ __ bind(&do_op);
// Perform the operation.
switch (op) {
@@ -7455,7 +7551,7 @@ void Reference::SetValue(InitState init_state) {
if (!key.is_smi()) {
__ JumpIfNotSmi(key.reg(), deferred->entry_label());
} else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
+ __ AbortIfNotSmi(key.reg());
}
// Check that the receiver is a JSArray.
@@ -7510,8 +7606,6 @@ void Reference::SetValue(InitState init_state) {
deferred->BindExit();
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
} else {
Result answer = cgen_->frame()->CallKeyedStoreIC();
@@ -7522,7 +7616,7 @@ void Reference::SetValue(InitState init_state) {
masm->nop();
cgen_->frame()->Push(&answer);
}
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
@@ -8015,138 +8109,71 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
}
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
-// trashed registers.
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
- Label done, right_exponent, normal_exponent;
- Register scratch = rbx;
- Register scratch2 = rdi;
- // Get exponent word.
- __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ movl(scratch2, scratch);
- __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(rsp, 0));
- __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
- __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load rcx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(rcx, rcx);
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ movl(scratch2, scratch);
- __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, Immediate(big_shift_distance));
- // Get the second half of the double.
- __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(rcx, Immediate(32 - big_shift_distance));
- __ or_(rcx, scratch2);
- // We have the answer in rcx, but we may need to negate it.
- __ testl(scratch, scratch);
- __ j(positive, &done);
- __ neg(rcx);
- __ jmp(&done);
- }
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::KExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in rcx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ subl(scratch2, Immediate(zero_exponent));
- // rcx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
- __ movl(rcx, Immediate(30));
- __ subl(rcx, scratch2);
-
- __ bind(&right_exponent);
- // Here rcx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, Immediate(shift_distance));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, Immediate(32 - shift_distance));
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to rcx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(rcx, rcx);
- __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ movl(rcx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ subl(rcx, scratch2);
- __ bind(&done);
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
+ } else {
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
}
+
+ __ bind(&done);
}
@@ -8196,14 +8223,11 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
// Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
+ IntegerConvert(masm, rax, rax);
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(rcx);
- // Tag the result as a smi and we're done.
- ASSERT(kSmiTagSize == 1);
- __ Integer32ToSmi(rax, rcx);
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
}
// Return from the stub.
@@ -8954,6 +8978,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ frame_->SpillTop();
}
// Record the position for debugging purposes.
@@ -9777,7 +9802,6 @@ void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- bool use_sse3,
Label* conversion_failure) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
@@ -9800,10 +9824,9 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, use_sse3, conversion_failure);
- __ movl(rdx, rcx);
+ IntegerConvert(masm, rdx, rdx);
- // Here edx has the untagged integer, eax has a Smi or a heap number.
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object);
@@ -9823,7 +9846,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, rax, use_sse3, conversion_failure);
+ IntegerConvert(masm, rcx, rax);
__ bind(&done);
__ movl(rax, rdx);
}
@@ -9892,13 +9915,12 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
- use_sse3_ ? "SSE3" : "SSE2",
static_operands_type_.ToString(),
BinaryOpIC::GetName(runtime_operands_type_));
return name_;
@@ -10073,8 +10095,8 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
if (static_operands_type_.IsSmi()) {
// Skip smi check if we know that both arguments are smis.
if (FLAG_debug_code) {
- __ AbortIfNotSmi(left, "Static type check claimed non-smi is smi.");
- __ AbortIfNotSmi(right, "Static type check claimed non-smi is smi.");
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
}
if (op_ == Token::BIT_OR) {
// Handle OR here, since we do extra smi-checking in the or code below.
@@ -10257,8 +10279,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// rdx: x
if (static_operands_type_.IsNumber() && FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
- __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
} else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
}
@@ -10331,7 +10353,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHL:
case Token::SHR: {
Label skip_allocation, non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+ FloatingPointHelper::LoadAsIntegers(masm, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -10342,7 +10364,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
- // Check if result is non-negative. This can only happen for a shift
+ // Check if result is negative. This can only happen for a shift
// by zero, which also doesn't update the sign flag.
__ testl(rax, rax);
__ j(negative, &non_smi_result);
@@ -11648,3 +11670,5 @@ ModuloFunction CreateModuloFunction() {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 5d9861ba65..01bbd20246 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -28,7 +28,9 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
+#include "ast.h"
#include "ic-inl.h"
+#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
@@ -433,6 +435,16 @@ class CodeGenerator: public AstVisitor {
TypeofState typeof_state,
JumpTarget* slow);
+ // Support for loading from local/global variables and arguments
+ // whose location is known unless they are shadowed by
+ // eval-introduced bindings. Generates no code for unsupported slot
+ // types and therefore expects to fall through to the slow jump target.
+ void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done);
+
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
@@ -711,7 +723,6 @@ class GenericBinaryOpStub: public CodeStub {
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -721,7 +732,6 @@ class GenericBinaryOpStub: public CodeStub {
flags_(FlagBits::decode(key)),
args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(TypeInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))),
runtime_operands_type_(type_info),
@@ -746,7 +756,6 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryFlags flags_;
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
// Number type information of operands, determined by code generator.
TypeInfo static_operands_type_;
@@ -772,15 +781,14 @@ class GenericBinaryOpStub: public CodeStub {
}
#endif
- // Minor key encoding in 18 bits TTNNNFRASOOOOOOOMM.
+ // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+ class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
+ class ArgsReversedBits: public BitField<bool, 10, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 12, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@@ -788,7 +796,6 @@ class GenericBinaryOpStub: public CodeStub {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
| StaticTypeInfoBits::encode(
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index cc20c58a3f..a43a02bb82 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -33,6 +33,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "cpu.h"
#include "macro-assembler.h"
@@ -77,3 +79,5 @@ void CPU::DebugBreak() {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 5470912a3d..89b98f14f5 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -28,6 +28,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "debug.h"
@@ -132,10 +134,10 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
// -----------------------------------
- // Register rax contains an object that needs to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
}
@@ -216,3 +218,5 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index bd912cdd22..44ffe5fb5e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -30,6 +30,9 @@
#include <stdarg.h>
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
#include "disasm.h"
namespace disasm {
@@ -1671,3 +1674,5 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
}
} // namespace disasm
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index 5e76901305..13eef0309c 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "scopes.h"
@@ -244,3 +246,5 @@ void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 6a0527cf6d..85ebc9586b 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "frames-inl.h"
namespace v8 {
@@ -107,3 +109,5 @@ byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index a34a94ea07..81424f6611 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -79,11 +81,17 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
bool function_in_register = true;
// Possibly allocate a local context.
- if (scope()->num_heap_slots() > 0) {
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
- __ CallRuntime(Runtime::kNewContext, 1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
function_in_register = false;
// Context is returned in both rax and rsi. It replaces the context
// passed to us. It's saved in the stack and kept live in rsi.
@@ -143,7 +151,18 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
}
{ Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ // Visit all the explicit declarations unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ scope()->VisitIllegalRedeclaration(this);
+ } else {
+ VisitDeclarations(scope()->declarations());
+ }
}
{ Comment cmnt(masm_, "[ Stack check");
@@ -427,6 +446,39 @@ void FullCodeGenerator::DropAndApply(int count,
}
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ // In an effect context, the true and the false case branch to the
+ // same label.
+ *if_true = *if_false = materialize_true;
+ break;
+ case Expression::kValue:
+ *if_true = materialize_true;
+ *if_false = materialize_false;
+ break;
+ case Expression::kTest:
+ *if_true = true_label_;
+ *if_false = false_label_;
+ break;
+ case Expression::kValueTest:
+ *if_true = materialize_true;
+ *if_false = false_label_;
+ break;
+ case Expression::kTestValue:
+ *if_true = true_label_;
+ *if_false = materialize_false;
+ break;
+ }
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
@@ -492,6 +544,61 @@ void FullCodeGenerator::Apply(Expression::Context context,
}
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ switch (location_) {
+ case kAccumulator:
+ __ LoadRoot(result_register(), value_root_index);
+ break;
+ case kStack:
+ __ PushRoot(value_root_index);
+ break;
+ }
+ break;
+ }
+ case Expression::kTest:
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ // If value is false it's needed.
+ if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ break;
+ case kStack:
+ // If value is false it's needed.
+ if (!flag) __ PushRoot(Heap::kFalseValueRootIndex);
+ break;
+ }
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ // If value is true it's needed.
+ if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ break;
+ case kStack:
+ // If value is true it's needed.
+ if (flag) __ PushRoot(Heap::kTrueValueRootIndex);
+ break;
+ }
+ __ jmp(flag ? true_label_ : false_label_);
+ break;
+ }
+}
+
+
void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
@@ -667,22 +774,23 @@ void FullCodeGenerator::Move(Slot* dst,
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function) {
Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
+ ASSERT(variable != NULL); // Must have been resolved.
+ Slot* slot = variable->slot();
+ Property* prop = variable->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
+ if (mode == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kAccumulator);
+ } else if (function != NULL) {
+ VisitForValue(function, kAccumulator);
__ movq(Operand(rbp, SlotOffset(slot)), result_register());
}
break;
@@ -692,7 +800,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context.
// The variable in the decl always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ movq(rbx,
@@ -700,13 +808,13 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
__ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context.");
}
- if (decl->mode() == Variable::CONST) {
+ if (mode == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
kScratchRegister);
// No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kAccumulator);
+ } else if (function != NULL) {
+ VisitForValue(function, kAccumulator);
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
result_register());
int offset = Context::SlotOffset(slot->index());
@@ -717,21 +825,19 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
case Slot::LOOKUP: {
__ push(rsi);
- __ Push(var->name());
+ __ Push(variable->name());
// Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
+ if (mode == Variable::CONST) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
- } else if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kStack);
+ } else if (function != NULL) {
+ VisitForValue(function, kStack);
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
@@ -741,32 +847,36 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
}
} else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
-
- if (decl->fun() != NULL) {
- VisitForValue(decl->fun(), kAccumulator);
+ if (function != NULL) {
+ VisitForValue(prop->key(), kStack);
+ VisitForValue(function, kAccumulator);
+ __ pop(rcx);
} else {
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rcx, result_register());
__ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
}
+ __ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test rax instruction following the call
// indicates that none of the load was inlined.
__ nop();
-
- // Value in rax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
}
}
}
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
@@ -777,19 +887,226 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+ // Keep the switch value on the stack until a case matches.
+ VisitForValue(stmt->tag(), kStack);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForValue(clause->label(), kAccumulator);
+
+ // Perform the comparison as if via '==='. The comparison stub expects
+ // the smi vs. smi case to be handled before it is called.
+ Label slow_case;
+ __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ JumpIfNotBothSmi(rdx, rax, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+
+ __ bind(&slow_case);
+ CompareStub stub(equal, true);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ jmp(nested_statement.break_target());
+ } else {
+ __ jmp(default_clause->body_target()->entry_label());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target()->entry_label());
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. Both SpiderMonkey and JSC
+ // ignore null and undefined in contrast to the specification; see
+ // ECMA-262 section 12.6.4.
+ VisitForValue(stmt->enumerable(), kAccumulator);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &exit);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &exit);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(rax, &convert);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(rax);
+
+ // TODO(kasperl): Check cache validity in generated code. This is a
+ // fast case for the JSObject::IsSimpleEnum cache validity
+ // checks. If we cannot guarantee cache validity, call the runtime
+ // system to check cache validity or get the property names in a
+ // fixed array.
+
+ // Get the set of properties to enumerate.
+ __ push(rax); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kMetaMapRootIndex);
+ __ j(not_equal, &fixed_array);
+
+ // We got a map in register rax. Get the enumeration cache from it.
+ __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+ __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Setup the four remaining stack slots.
+ __ push(rax); // Map.
+ __ push(rdx); // Enumeration cache.
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax); // Enumeration cache length (as smi).
+ __ Push(Smi::FromInt(0)); // Initial index.
+ __ jmp(&loop);
+
+ // We got a fixed array in register rax. Iterate through that.
+ __ bind(&fixed_array);
+ __ Push(Smi::FromInt(0)); // Map (0) - force slow check.
+ __ push(rax);
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax); // Fixed array length (as smi).
+ __ Push(Smi::FromInt(0)); // Initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
+ __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ j(above_equal, loop_statement.break_target());
+
+ // Get the current entry of the array into register rbx.
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ SmiIndex index = __ SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rbx, FieldOperand(rbx,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case into register rdx.
+ __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ Label update_each;
+ __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ j(equal, &update_each);
+
+ // Convert the entry to a string or null if it isn't a property
+ // anymore. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(rcx); // Enumerable.
+ __ push(rbx); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.continue_target());
+ __ movq(rbx, rax);
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register rbx.
+ __ bind(&update_each);
+ __ movq(result_register(), rbx);
+ // Perform the assignment as if via '='.
+ EmitAssignment(stmt->each());
+
+ // Generate code for the body of the loop.
+ Label stack_limit_hit, stack_check_done;
+ Visit(stmt->body());
+
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_done);
+
+ // Generate code for going to the next element by incrementing the
+ // index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_target());
+ __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
+ __ jmp(&loop);
+
+ // Slow case for the stack limit check.
+ StackCheckStub stack_check_stub;
+ __ bind(&stack_limit_hit);
+ __ CallStub(&stack_check_stub);
+ __ jmp(&stack_check_done);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_target());
+ __ addq(rsp, Immediate(5 * kPointerSize));
+
+ // Exit and decrement the loop depth.
+ __ bind(&exit);
+ decrement_loop_depth();
+}
- // Build the shared function info and instantiate the function based
- // on it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script(), this);
- if (HasStackOverflow()) return;
- // Create a new closure.
- __ push(rsi);
- __ Push(function_info);
- __ CallRuntime(Runtime::kNewClosure, 2);
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && info->num_literals() == 0) {
+ FastNewClosureStub stub;
+ __ Push(info);
+ __ CallStub(&stub);
+ } else {
+ __ push(rsi);
+ __ Push(info);
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ }
Apply(context_, rax);
}
@@ -833,7 +1150,20 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
- Apply(context, slot);
+ if (var->mode() == Variable::CONST) {
+ // Constants may be the hole value if they have not been initialized.
+ // Unhole them.
+ Label done;
+ MemOperand slot_operand = EmitSlotSearch(slot, rax);
+ __ movq(rax, slot_operand);
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ Apply(context, rax);
+ } else {
+ Apply(context, slot);
+ }
} else {
Comment cmnt(masm_, "Rewritten parameter");
@@ -969,22 +1299,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ __ CallStub(&stub);
}
bool result_saved = false; // Is the result saved to the stack?
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
+ for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
@@ -1019,7 +1355,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() != Token::INIT_CONST);
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1045,8 +1387,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_PROPERTY:
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ if (expr->is_compound()) {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0));
+ __ push(rax);
+ } else {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ }
break;
}
@@ -1091,6 +1440,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op(),
context_);
break;
case NAMED_PROPERTY:
@@ -1132,61 +1482,130 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
}
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(rax); // Preserve value.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ movq(rdx, rax);
+ __ pop(rax); // Restore value.
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(rax); // Preserve value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
+ __ Drop(3); // Receiver, key, and extra copy of value.
+ break;
+ }
+ }
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op,
Expression::Context context) {
- // Three main cases: non-this global variables, lookup slots, and
- // all other types of slots. Left-hand-side parameters that rewrite
- // to explicit property accesses do not reach here.
+ // Left-hand sides that rewrite to explicit property accesses do not reach
+ // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
- Slot* slot = var->slot();
+
if (var->is_global()) {
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
- // rcx, and the global object in rdx.
+ // rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, rax);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, rax);
+ __ nop();
- } else if (var->slot() != NULL) {
+ } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+ // Perform the assignment for non-const variables and for initialization
+ // of const variables. Const assignments are simply skipped.
+ Label done;
+ Slot* slot = var->slot();
switch (slot->type()) {
- case Slot::LOCAL:
case Slot::PARAMETER:
- __ movq(Operand(rbp, SlotOffset(slot)), result_register());
+ case Slot::LOCAL:
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ movq(rdx, Operand(rbp, SlotOffset(slot)));
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done);
+ }
+ // Perform the assignment.
+ __ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, rcx);
- __ movq(target, result_register());
-
- // RecordWrite may destroy all its register arguments.
- __ movq(rdx, result_register());
+ if (op == Token::INIT_CONST) {
+ // Detect const reinitialization by checking for the hole value.
+ __ movq(rdx, target);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done);
+ }
+ // Perform the assignment and issue the write barrier.
+ __ movq(target, rax);
+ // The value of the assignment is in rax. RecordWrite clobbers its
+ // register arguments.
+ __ movq(rdx, rax);
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ RecordWrite(rcx, offset, rdx, rbx);
break;
}
case Slot::LOOKUP:
- UNREACHABLE();
+ // Call the runtime for the assignment. The runtime will ignore
+ // const reinitialization.
+ __ push(rax); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ if (op == Token::INIT_CONST) {
+ // The runtime will ignore const redeclaration.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
break;
}
- Apply(context, result_register());
-
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
+ __ bind(&done);
}
+
+ Apply(context, rax);
}
@@ -1245,6 +1664,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register());
}
+ __ pop(rcx);
+ if (expr->ends_initialization_block()) {
+ __ movq(rdx, Operand(rsp, 0)); // Leave receiver on the stack for later.
+ } else {
+ __ pop(rdx);
+ }
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@@ -1255,15 +1680,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
+ __ pop(rdx);
__ push(rax); // Result of assignment, saved even if not needed.
- // Receiver is under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
+ __ push(rdx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(rax);
}
- // Receiver and key are still on stack.
- DropAndApply(2, context_, rax);
+ Apply(context_, rax);
}
@@ -1319,7 +1743,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1334,8 +1759,47 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Variable* var = fun->AsVariableProxy()->AsVariable();
if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. The we call the resolved function using the given
+ // arguments.
+ VisitForValue(fun, kStack);
+ __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
+
+ // Push the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Push copy of the function - found below the arguments.
+ __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(rsp, arg_count * kPointerSize));
+ } else {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+ // The runtime call returns a pair of values in rax (function) and
+ // rdx (receiver). Touch up the stack with the right values.
+ __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ DropAndApply(1, context_, rax);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Call to a global variable.
// Push global object as receiver for the call IC lookup.
@@ -1343,8 +1807,15 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
+ // Call to a lookup slot (dynamically introduced variable). Call
+ // the runtime to find the function to call (returned in rax) and
+ // the object holding it (returned in rdx).
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ push(rax); // Function.
+ __ push(rdx); // Receiver.
+ EmitCallWithStub(expr);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -1435,7 +1906,711 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+ EmitIsSmi(expr->arguments());
+ } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+ EmitIsNonNegativeSmi(expr->arguments());
+ } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+ EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+ EmitIsUndetectableObject(expr->arguments());
+ } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+ EmitIsFunction(expr->arguments());
+ } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+ EmitIsArray(expr->arguments());
+ } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+ EmitIsRegExp(expr->arguments());
+ } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+ EmitIsConstructCall(expr->arguments());
+ } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+ EmitObjectEquals(expr->arguments());
+ } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+ EmitArguments(expr->arguments());
+ } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+ EmitArgumentsLength(expr->arguments());
+ } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+ EmitClassOf(expr->arguments());
+ } else if (strcmp("_Log", *name->ToCString()) == 0) {
+ EmitLog(expr->arguments());
+ } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+ EmitRandomHeapNumber(expr->arguments());
+ } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+ EmitSubString(expr->arguments());
+ } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+ EmitRegExpExec(expr->arguments());
+ } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+ EmitValueOf(expr->arguments());
+ } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+ EmitSetValueOf(expr->arguments());
+ } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+ EmitNumberToString(expr->arguments());
+ } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+ EmitCharFromCode(expr->arguments());
+ } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+ EmitFastCharCodeAt(expr->arguments());
+ } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+ EmitStringAdd(expr->arguments());
+ } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+ EmitStringCompare(expr->arguments());
+ } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+ EmitMathPow(expr->arguments());
+ } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+ EmitMathSin(expr->arguments());
+ } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+ EmitMathCos(expr->arguments());
+ } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+ EmitMathSqrt(expr->arguments());
+ } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+ EmitCallFunction(expr->arguments());
+ } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+ EmitRegExpConstructResult(expr->arguments());
+ } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+ EmitSwapElements(expr->arguments());
+ } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+ EmitGetFromCache(expr->arguments());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ Condition positive_smi = __ CheckPositiveSmi(rax);
+ __ j(positive_smi, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, if_true);
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, if_false);
+ __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+ __ j(below_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Get the frame pointer for the calling frame.
+ __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ SmiCompare(Operand(rax, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ pop(rbx);
+ __ cmpq(rax, rbx);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in edx and the formal
+ // parameter count in eax.
+ VisitForValue(args->at(0), kAccumulator);
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ if (FLAG_debug_code) __ AbortIfNotSmi(rax);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(rax, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &null);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &function);
+
+ // Check if the constructor in the map is a function.
+ __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &non_function_constructor);
+
+ // rax now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ jmp(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ Move(rax, Factory::function_class_symbol());
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ Move(rax, Factory::Object_symbol());
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(rax, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+
+ __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ // To allocate a heap number, and ensure that it is not a smi, we
+ // call the runtime function FUnaryMinus on 0, returning the double
+ // -0.0. A new, distinct heap number is returned each time.
+ __ Push(Smi::FromInt(0));
+ __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ __ movq(rbx, rax);
+
+ __ bind(&heapnumber_allocated);
+
+ // Return a random uint32 number in rax.
+ // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm1, rcx);
+ __ movd(xmm0, rax);
+ __ cvtss2sd(xmm1, xmm1);
+ __ xorpd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+
+ __ movq(rax, rbx);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ASSERT(args->length() == 4);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ VisitForValue(args->at(3), kStack);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(rax, &done);
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
+ __ j(not_equal, &done);
+ __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+
+ __ bind(&done);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the runtime function.
+ ASSERT(args->length() == 2);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack); // Load the object.
+ VisitForValue(args->at(1), kAccumulator); // Load the value.
+ __ pop(rbx); // rax = value. ebx = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(rbx, &done);
+
+ // If the object is not a value type, return the value.
+ __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
+ __ j(not_equal, &done);
+
+ // Store the value.
+ __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ movq(rdx, rax);
+ __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+
+ __ bind(&done);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and call the stub.
+ VisitForValue(args->at(0), kStack);
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label slow_case, done;
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ __ JumpIfNotSmi(rax, &slow_case);
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, Immediate(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case);
+
+ __ Move(rbx, Factory::single_character_string_cache());
+ __ movq(rbx, FieldOperand(rbx,
+ rcx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+ __ movq(rax, rbx);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+
+ __ bind(&done);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+ // TODO(fsc): Port the complete implementation from the classic back-end.
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ Apply(context_, rax);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the runtime function.
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // For receiver and function.
+ VisitForValue(args->at(0), kStack); // Receiver.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i + 1), kStack);
+ }
+ VisitForValue(args->at(arg_count + 1), kAccumulator); // Function.
+
+ // InvokeFunction requires function in rdi. Move it in there.
+ if (!result_register().is(rdi)) __ movq(rdi, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kStack);
+ VisitForValue(args->at(2), kStack);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ Top::global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ Apply(context_, rax);
+ return;
+ }
+
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register key = rax;
+ Register cache = rbx;
+ Register tmp = rcx;
+ __ movq(cache, CodeGenerator::ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(cache,
+ FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ movq(cache,
+ CodeGenerator::ContextOperand(
+ cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ movq(cache,
+ FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done, not_found;
+ // tmp now holds finger offset as a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ SmiIndex index =
+ __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
+ __ cmpq(key, FieldOperand(cache,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &not_found);
+ __ movq(rax, FieldOperand(cache,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ jmp(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ push(cache);
+ __ push(key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ Apply(context_, rax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1468,6 +2643,46 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* prop = expr->expression()->AsProperty();
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (prop == NULL && var == NULL) {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ Apply(context_, true);
+ } else if (var != NULL &&
+ !var->is_global() &&
+ var->slot() != NULL &&
+ var->slot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ Apply(context_, false);
+ } else {
+ // Property or variable reference. Call the delete builtin with
+ // object and property name as arguments.
+ if (prop != NULL) {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ } else if (var->is_global()) {
+ __ push(CodeGenerator::GlobalObject());
+ __ Push(var->name());
+ } else {
+ // Non-global variable. Call the runtime to look up the context
+ // where the variable was introduced.
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLookupContext, 2);
+ __ push(rax);
+ __ Push(var->name());
+ }
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ Apply(context_, rax);
+ }
+ break;
+ }
+
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
@@ -1508,33 +2723,15 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false, done;
- // Initially assume a pure test context. Notice that the labels are
- // swapped.
- Label* if_true = false_label_;
- Label* if_false = true_label_;
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &materialize_false;
- if_false = &materialize_true;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_false = &materialize_true;
- break;
- case Expression::kTestValue:
- if_true = &materialize_false;
- break;
- }
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+
+ // Notice that the labels are swapped.
+ PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
VisitForControl(expr->expression(), if_true, if_false);
+
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
@@ -1630,6 +2827,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ // Invalid left-hand-sides are rewritten to have a 'throw
+ // ReferenceError' as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
// Expression can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1650,7 +2854,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
- } else {
+ } else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ Push(Smi::FromInt(0));
@@ -1735,7 +2939,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
+ // Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
Expression::kEffect);
// For all contexts except kEffect: We have the result on
// top of the stack.
@@ -1743,7 +2949,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
ApplyTOS(context_);
}
} else {
+ // Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN,
context_);
}
break;
@@ -1765,18 +2973,19 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
+ __ pop(rcx);
+ __ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
// site for it to patch.
__ nop();
if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(2, context_, rax);
+ Apply(context_, rax);
}
break;
}
@@ -1818,36 +3027,39 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::EmitNullCompare(bool strict,
+ Register obj,
+ Register null_const,
+ Label* if_true,
+ Label* if_false,
+ Register scratch) {
+ __ cmpq(obj, null_const);
+ if (strict) {
+ __ j(equal, if_true);
+ } else {
+ __ j(equal, if_true);
+ __ CompareRoot(obj, Heap::kUndefinedValueRootIndex);
+ __ j(equal, if_true);
+ __ JumpIfSmi(obj, if_false);
+ // It can be an undetectable object.
+ __ movq(scratch, FieldOperand(obj, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_true);
+ }
+ __ jmp(if_false);
+}
+
+
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &materialize_true;
- if_false = &materialize_false;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &materialize_true;
- break;
- case Expression::kTestValue:
- if_false = &materialize_false;
- break;
- }
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
VisitForValue(expr->left(), kStack);
switch (expr->op()) {
@@ -1877,10 +3089,24 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
strict = true;
// Fall through.
- case Token::EQ:
+ case Token::EQ: {
cc = equal;
__ pop(rdx);
+ // If either operand is constant null we do a fast compare
+ // against null.
+ Literal* right_literal = expr->right()->AsLiteral();
+ Literal* left_literal = expr->left()->AsLiteral();
+ if (right_literal != NULL && right_literal->handle()->IsNull()) {
+ EmitNullCompare(strict, rdx, rax, if_true, if_false, rcx);
+ Apply(context_, if_true, if_false);
+ return;
+ } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+ EmitNullCompare(strict, rax, rdx, if_true, if_false, rcx);
+ Apply(context_, if_true, if_false);
+ return;
+ }
break;
+ }
case Token::LT:
cc = less;
__ pop(rdx);
@@ -1991,3 +3217,5 @@ void FullCodeGenerator::ExitFinallyBlock() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 88fcfd1400..8766ebb145 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
@@ -778,16 +780,16 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
// -----------------------------------
- __ pop(rcx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // key
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
__ push(rax); // value
- __ push(rcx); // return address
+ __ push(rbx); // return address
// Do tail-call to runtime routine.
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
@@ -798,16 +800,16 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
// -----------------------------------
- __ pop(rcx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // key
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
__ push(rax); // value
- __ push(rcx); // return address
+ __ push(rbx); // return address
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
@@ -816,50 +818,46 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, fast, array, extra, check_pixel_array;
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
- // Get the key from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
- __ JumpIfNotSmi(rbx, &slow);
+ __ JumpIfNotSmi(rcx, &slow);
- __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+ __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JS object.
- __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
- // rbx: index (as a smi)
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ // rcx: index (as a smi)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
- __ SmiToInteger32(rdx, rbx);
- __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+ __ SmiToInteger32(rdi, rcx);
+ __ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
// rax: value
- // rcx: FixedArray
- // rbx: index (as a smi)
+ // rbx: FixedArray
+ // rcx: index (as a smi)
__ j(below, &fast);
// Slow case: call runtime.
@@ -868,31 +866,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// rax: value
- // rcx: elements array
- // rbx: index (as a smi), zero-extended.
+ // rdx: receiver
+ // rbx: receiver's elements array
+ // rcx: index (as a smi), zero-extended.
__ bind(&check_pixel_array);
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ JumpIfNotSmi(rax, &slow);
- __ SmiToInteger32(rbx, rbx);
- __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ SmiToInteger32(rdi, rcx);
+ __ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
- __ movq(rdx, rax); // Save the value.
- __ SmiToInteger32(rax, rax);
+ // No more bailouts to slow case on this path, so key not needed.
+ __ SmiToInteger32(rcx, rax);
{ // Clamp the value to [0..255].
Label done;
- __ testl(rax, Immediate(0xFFFFFF00));
+ __ testl(rcx, Immediate(0xFFFFFF00));
__ j(zero, &done);
- __ setcc(negative, rax); // 1 if negative, 0 if positive.
- __ decb(rax); // 0 if negative, 255 if positive.
+ __ setcc(negative, rcx); // 1 if negative, 0 if positive.
+ __ decb(rcx); // 0 if negative, 255 if positive.
__ bind(&done);
}
- __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movb(Operand(rcx, rbx, times_1, 0), rax);
- __ movq(rax, rdx); // Return the original value.
+ __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(rbx, rdi, times_1, 0), rcx);
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
@@ -900,18 +898,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// element to the array by writing to array[array.length].
__ bind(&extra);
// rax: value
- // rdx: JSArray
- // rcx: FixedArray
- // rbx: index (as a smi)
+ // rdx: receiver (a JSArray)
+ // rbx: receiver's elements array (a FixedArray)
+ // rcx: index (as a smi)
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
- __ SmiToInteger64(rbx, rbx);
- __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiToInteger64(rdi, rcx);
+ __ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Increment and restore smi-tag.
- __ Integer64PlusConstantToSmi(rbx, rbx, 1);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
- __ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
+ __ Integer64PlusConstantToSmi(rdi, rdi, 1);
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
@@ -919,39 +916,39 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// length is always a smi.
__ bind(&array);
// rax: value
- // rdx: JSArray
- // rbx: index (as a smi)
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ // rdx: receiver (a JSArray)
+ // rcx: index (as a smi)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
- __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+ __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
// rax: value
- // rcx: FixedArray
- // rbx: index (as a smi)
+ // rbx: receiver's elements array (a FixedArray)
+ // rcx: index (as a smi)
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ movq(Operand(rcx, index.reg, index.scale,
+ SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+ __ movq(Operand(rbx, index.reg, index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
rax);
__ ret(0);
__ bind(&non_smi_value);
- // Slow case that needs to retain rbx for use by RecordWrite.
+ // Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
- SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
- __ movq(Operand(rcx, index2.reg, index2.scale,
+ SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
+ __ movq(Operand(rbx, index2.reg, index2.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
rax);
__ movq(rdx, rax);
- __ RecordWriteNonSmi(rcx, 0, rdx, rbx);
+ __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0);
}
@@ -959,102 +956,103 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, check_heap_number;
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
- // Get the key from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
- __ JumpIfNotSmi(rbx, &slow);
+ __ JumpIfNotSmi(rcx, &slow);
// Check that the object is a JS object.
- __ CmpInstanceType(rcx, JS_OBJECT_TYPE);
+ __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: value
- // rdx: JSObject
- // rbx: index (as a smi)
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
- __ SmiToInteger32(rbx, rbx); // Untag the index.
- __ cmpl(rbx, FieldOperand(rcx, ExternalArray::kLengthOffset));
+ __ SmiToInteger32(rdi, rcx); // Untag the index.
+ __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
- // rcx: elements array
- // rbx: untagged index
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
__ JumpIfNotSmi(rax, &check_heap_number);
- __ movq(rdx, rax); // Save the value.
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
- // rcx: base pointer of external storage
+ // No more branches to slow case on this path. Key and receiver not needed.
+ __ SmiToInteger32(rdx, rax);
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ movw(Operand(rcx, rbx, times_2, 0), rax);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ movl(Operand(rcx, rbx, times_4, 0), rax);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
- __ push(rax);
+ __ push(rdx);
__ fild_s(Operand(rsp, 0));
- __ pop(rax);
- __ fstp_s(Operand(rcx, rbx, times_4, 0));
+ __ pop(rdx);
+ __ fstp_s(Operand(rbx, rdi, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
- __ movq(rax, rdx); // Return the original value.
__ ret(0);
__ bind(&check_heap_number);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rdx);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
+ // No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rax); // Save the value.
- __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
- // rbx: untagged index
- // rcx: base pointer of external storage
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rdi: untagged index
+ // rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
- __ fstp_s(Operand(rcx, rbx, times_4, 0));
- __ movq(rax, rdx); // Return the original value.
+ __ fstp_s(Operand(rbx, rdi, times_4, 0));
__ ret(0);
} else {
// Need to perform float-to-int conversion.
@@ -1063,66 +1061,70 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ fucomi(0);
__ j(parity_even, &is_nan);
- __ push(rax); // Make room on stack
+ __ push(rdx); // Make room on the stack. Receiver is no longer needed.
__ fistp_d(Operand(rsp, 0));
- __ pop(rax);
- // rax: untagged integer value
+ __ pop(rdx);
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ movw(Operand(rcx, rbx, times_2, 0), rax);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// We also need to explicitly check for +/-Infinity. These are
// converted to MIN_INT, but we need to be careful not to
- // confuse with legal uses of MIN_INT.
+ // confuse with legal uses of MIN_INT. Since MIN_INT truncated
+ // to 8 or 16 bits is zero, we only perform this test when storing
+ // 32-bit ints.
Label not_infinity;
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
- __ movzxwq(rdi, FieldOperand(rdx, HeapNumber::kValueOffset + 6));
- __ and_(rdi, Immediate(0x7FF0));
- __ cmpw(rdi, Immediate(0x7FF0));
+ __ movzxwq(rcx, FieldOperand(rax, HeapNumber::kValueOffset + 6));
+ __ and_(rcx, Immediate(0x7FF0));
+ __ cmpw(rcx, Immediate(0x7FF0));
__ j(not_equal, &not_infinity);
- __ movq(rax, Immediate(0));
+ __ movq(rdx, Immediate(0));
__ bind(&not_infinity);
- __ movl(Operand(rcx, rbx, times_4, 0), rax);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
default:
UNREACHABLE();
break;
}
- __ movq(rax, rdx); // Return the original value.
__ ret(0);
__ bind(&is_nan);
+ // rdi: untagged index
+ // rbx: base pointer of external storage
__ ffree();
__ fincstp();
- __ movq(rax, Immediate(0));
+ __ movq(rdx, Immediate(0));
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ movw(Operand(rcx, rbx, times_2, 0), rax);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ movl(Operand(rcx, rbx, times_4, 0), rax);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
default:
UNREACHABLE();
break;
}
- __ movq(rax, rdx); // Return the original value.
__ ret(0);
}
@@ -1619,3 +1621,5 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/jump-target-x64.cc b/deps/v8/src/x64/jump-target-x64.cc
index 9b08c1f892..1208b0dbea 100644
--- a/deps/v8/src/x64/jump-target-x64.cc
+++ b/deps/v8/src/x64/jump-target-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@@ -431,3 +433,5 @@ void BreakTarget::Bind(Result* arg) {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index f9b444b73b..065b61693f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "assembler-x64.h"
@@ -800,7 +802,7 @@ void MacroAssembler::SmiSub(Register dst,
void MacroAssembler::SmiSub(Register dst,
Register src1,
- Operand const& src2,
+ const Operand& src2,
Label* on_not_smi_result) {
if (on_not_smi_result == NULL) {
// No overflow checking. Use only when it's known that
@@ -918,6 +920,14 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
}
+void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
+ if (constant->value() != 0) {
+ Move(kScratchRegister, constant);
+ addq(dst, kScratchRegister);
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
Smi* constant,
@@ -1730,23 +1740,21 @@ void MacroAssembler::CheckMap(Register obj,
}
-void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+void MacroAssembler::AbortIfNotNumber(Register object) {
Label ok;
Condition is_smi = CheckSmi(object);
j(is_smi, &ok);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
Factory::heap_number_map());
- Assert(equal, msg);
+ Assert(equal, "Operand not a number");
bind(&ok);
}
-void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
+void MacroAssembler::AbortIfNotSmi(Register object) {
Label ok;
Condition is_smi = CheckSmi(object);
- j(is_smi, &ok);
- Assert(equal, msg);
- bind(&ok);
+ Assert(is_smi, "Operand not a smi");
}
@@ -2766,3 +2774,5 @@ CodePatcher::~CodePatcher() {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 2eeb1fd254..b4f3240ec8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -306,6 +306,10 @@ class MacroAssembler: public Assembler {
// No overflow testing on the result is done.
void SmiAddConstant(Register dst, Register src, Smi* constant);
+ // Add an integer constant to a tagged smi, giving a tagged smi as result.
+ // No overflow testing on the result is done.
+ void SmiAddConstant(const Operand& dst, Smi* constant);
+
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
void SmiAddConstant(Register dst,
@@ -349,7 +353,7 @@ class MacroAssembler: public Assembler {
void SmiSub(Register dst,
Register src1,
- Operand const& src2,
+ const Operand& src2,
Label* on_not_smi_result);
// Multiplies smi values and return the result as a smi,
@@ -533,10 +537,10 @@ class MacroAssembler: public Assembler {
void FCmp();
// Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object, const char* msg);
+ void AbortIfNotNumber(Register object);
// Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object, const char* msg);
+ void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 50b4120a5d..383399ea60 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
#include "serialize.h"
#include "unicode.h"
#include "log.h"
@@ -188,8 +191,8 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kAtStart), Immediate(0));
- BranchOrBacktrack(equal, &not_at_start);
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
@@ -200,8 +203,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kAtStart), Immediate(0));
- BranchOrBacktrack(equal, on_not_at_start);
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
@@ -219,6 +222,15 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
+#ifdef DEBUG
+ // If input is ASCII, don't even bother calling here if the string to
+ // match contains a non-ascii character.
+ if (mode_ == ASCII) {
+ for (int i = 0; i < str.length(); i++) {
+ ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
+ }
+ }
+#endif
int byte_length = str.length() * char_size();
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
@@ -232,16 +244,71 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
on_failure = &backtrack_label_;
}
- // TODO(lrn): Test multiple characters at a time by loading 4 or 8 bytes
- // at a time.
- for (int i = 0; i < str.length(); i++) {
+ // Do one character test first to minimize loading for the case that
+ // we don't match at all (loading more than one character introduces that
+ // chance of reading unaligned and reading across cache boundaries).
+ // If the first character matches, expect a larger chance of matching the
+ // string, and start loading more characters at a time.
+ if (mode_ == ASCII) {
+ __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
+ Immediate(static_cast<int8_t>(str[0])));
+ } else {
+ // Don't use 16-bit immediate. The size changing prefix throws off
+ // pre-decoding.
+ __ movzxwl(rax,
+ Operand(rsi, rdi, times_1, byte_offset));
+ __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
+ }
+ BranchOrBacktrack(not_equal, on_failure);
+
+ __ lea(rbx, Operand(rsi, rdi, times_1, 0));
+ for (int i = 1, n = str.length(); i < n; ) {
if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
+ if (i + 8 <= n) {
+ uint64_t combined_chars =
+ (static_cast<uint64_t>(str[i + 0]) << 0) ||
+ (static_cast<uint64_t>(str[i + 1]) << 8) ||
+ (static_cast<uint64_t>(str[i + 2]) << 16) ||
+ (static_cast<uint64_t>(str[i + 3]) << 24) ||
+ (static_cast<uint64_t>(str[i + 4]) << 32) ||
+ (static_cast<uint64_t>(str[i + 5]) << 40) ||
+ (static_cast<uint64_t>(str[i + 6]) << 48) ||
+ (static_cast<uint64_t>(str[i + 7]) << 56);
+ __ movq(rax, combined_chars, RelocInfo::NONE);
+ __ cmpq(rax, Operand(rbx, byte_offset + i));
+ i += 8;
+ } else if (i + 4 <= n) {
+ uint32_t combined_chars =
+ (static_cast<uint32_t>(str[i + 0]) << 0) ||
+ (static_cast<uint32_t>(str[i + 1]) << 8) ||
+ (static_cast<uint32_t>(str[i + 2]) << 16) ||
+ (static_cast<uint32_t>(str[i + 3]) << 24);
+ __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
+ i += 4;
+ } else {
+ __ cmpb(Operand(rbx, byte_offset + i),
+ Immediate(static_cast<int8_t>(str[i])));
+ i++;
+ }
} else {
ASSERT(mode_ == UC16);
- __ cmpw(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(str[i]));
+ if (i + 4 <= n) {
+ uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
+ __ movq(rax, combined_chars, RelocInfo::NONE);
+ __ cmpq(rax,
+ Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
+ i += 4;
+ } else if (i + 2 <= n) {
+ uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
+ __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
+ Immediate(combined_chars));
+ i += 2;
+ } else {
+ __ movzxwl(rax,
+ Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
+ __ cmpl(rax, Immediate(str[i]));
+ i++;
+ }
}
BranchOrBacktrack(not_equal, on_failure);
}
@@ -671,7 +738,6 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
__ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
@@ -724,14 +790,6 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ xor_(rcx, rcx); // setcc only operates on cl (lower byte of rcx).
- __ testq(rbx, rbx);
- __ setcc(zero, rcx); // 1 if 0 (start of string), 0 if positive.
- __ movq(Operand(rbp, kAtStart), rcx);
-
if (num_saved_registers_ > 0) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
@@ -761,8 +819,8 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Move(code_object_pointer(), masm_->CodeObject());
// Load previous char as initial value of current-character.
Label at_start;
- __ cmpb(Operand(rbp, kAtStart), Immediate(0));
- __ j(not_equal, &at_start);
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
@@ -1313,3 +1371,5 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 490326971f..3bcc3ac165 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -173,10 +173,9 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// the frame in GetCode.
static const int kInputStartMinusOne =
kLastCalleeSaveRegister - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/x64/register-allocator-x64.cc b/deps/v8/src/x64/register-allocator-x64.cc
index cf295935b9..1f5467e130 100644
--- a/deps/v8/src/x64/register-allocator-x64.cc
+++ b/deps/v8/src/x64/register-allocator-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
@@ -85,3 +87,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 384bca1892..25361b367f 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -28,6 +28,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@@ -2027,23 +2029,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_store_field, 1);
- // Get the name from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
// Check that the name has not changed.
__ Cmp(rcx, Handle<String>(name));
__ j(not_equal, &miss);
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
// Generate store field code. Preserves receiver and name on jump to miss.
GenerateStoreField(masm(),
object,
@@ -2367,3 +2364,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index 1e4374b079..db316bba40 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -1029,6 +1031,46 @@ void VirtualFrame::DebugBreak() {
#endif
+// This function assumes that the only results that could be in a_reg or b_reg
+// are a and b. Other results can be live, but must not be in a_reg or b_reg.
+void VirtualFrame::MoveResultsToRegisters(Result* a,
+ Result* b,
+ Register a_reg,
+ Register b_reg) {
+ ASSERT(!a_reg.is(b_reg));
+ // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
+ ASSERT(cgen()->allocator()->count(a_reg) <= 2);
+ ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
+ ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
+ ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
+ (a->is_register() && a->reg().is(a_reg)) ||
+ (b->is_register() && b->reg().is(a_reg)));
+ // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
+ ASSERT(cgen()->allocator()->count(b_reg) <= 2);
+ ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
+ ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
+ ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
+ (a->is_register() && a->reg().is(b_reg)) ||
+ (b->is_register() && b->reg().is(b_reg)));
+
+ if (a->is_register() && a->reg().is(a_reg)) {
+ b->ToRegister(b_reg);
+ } else if (!cgen()->allocator()->is_used(a_reg)) {
+ a->ToRegister(a_reg);
+ b->ToRegister(b_reg);
+ } else if (cgen()->allocator()->is_used(b_reg)) {
+ // a must be in b_reg, b in a_reg.
+ __ xchg(a_reg, b_reg);
+ // Results a and b will be invalidated, so it is ok if they are switched.
+ } else {
+ b->ToRegister(b_reg);
+ a->ToRegister(a_reg);
+ }
+ a->Unuse();
+ b->Unuse();
+}
+
+
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
// name in rcx and receiver on the stack. It does not drop the
@@ -1051,15 +1093,52 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
}
-Result VirtualFrame::CallKeyedStoreIC() {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in rax and key and receiver on the stack. It does
- // not drop the key and receiver.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- Result value = Pop();
- PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
- value.ToRegister(rax);
- value.Unuse();
+Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic,
+ Result* value,
+ Result* key,
+ Result* receiver) {
+ // The IC expects value in rax, key in rcx, and receiver in rdx.
+ PrepareForCall(0, 0);
+ // If one of the three registers is free, or a value is already
+ // in the correct register, move the remaining two values using
+ // MoveResultsToRegisters().
+ if (!cgen()->allocator()->is_used(rax) ||
+ (value->is_register() && value->reg().is(rax))) {
+ if (!cgen()->allocator()->is_used(rax)) {
+ value->ToRegister(rax);
+ }
+ MoveResultsToRegisters(key, receiver, rcx, rdx);
+ value->Unuse();
+ } else if (!cgen()->allocator()->is_used(rcx) ||
+ (key->is_register() && key->reg().is(rcx))) {
+ if (!cgen()->allocator()->is_used(rcx)) {
+ key->ToRegister(rcx);
+ }
+ MoveResultsToRegisters(value, receiver, rax, rdx);
+ key->Unuse();
+ } else if (!cgen()->allocator()->is_used(rdx) ||
+ (receiver->is_register() && receiver->reg().is(rdx))) {
+ if (!cgen()->allocator()->is_used(rdx)) {
+ receiver->ToRegister(rdx);
+ }
+ MoveResultsToRegisters(key, value, rcx, rax);
+ receiver->Unuse();
+ } else {
+ // Otherwise, no register is free, and no value is in the correct place.
+ // We have one of the two circular permutations of eax, ecx, edx.
+ ASSERT(value->is_register());
+ if (value->reg().is(rcx)) {
+ __ xchg(rax, rdx);
+ __ xchg(rax, rcx);
+ } else {
+ __ xchg(rax, rcx);
+ __ xchg(rax, rdx);
+ }
+ value->Unuse();
+ key->Unuse();
+ receiver->Unuse();
+ }
+
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
@@ -1106,51 +1185,6 @@ Result VirtualFrame::CallConstructor(int arg_count) {
}
-Result VirtualFrame::CallStoreIC() {
- // Name, value, and receiver are on top of the frame. The IC
- // expects name in rcx, value in rax, and receiver in edx.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- Result name = Pop();
- Result value = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
-
- // Optimized for case in which name is a constant value.
- if (name.is_register() && (name.reg().is(rdx) || name.reg().is(rax))) {
- if (!is_used(rcx)) {
- name.ToRegister(rcx);
- } else if (!is_used(rbx)) {
- name.ToRegister(rbx);
- } else {
- ASSERT(!is_used(rdi)); // Only three results are live, so rdi is free.
- name.ToRegister(rdi);
- }
- }
- // Now name is not in edx or eax, so we can fix them, then move name to ecx.
- if (value.is_register() && value.reg().is(rdx)) {
- if (receiver.is_register() && receiver.reg().is(rax)) {
- // Wrong registers.
- __ xchg(rax, rdx);
- } else {
- // Register rax is free for value, which frees rcx for receiver.
- value.ToRegister(rax);
- receiver.ToRegister(rdx);
- }
- } else {
- // Register rcx is free for receiver, which guarantees rax is free for
- // value.
- receiver.ToRegister(rdx);
- value.ToRegister(rax);
- }
- // Receiver and value are in the right place, so rcx is free for name.
- name.ToRegister(rcx);
- name.Unuse();
- value.Unuse();
- receiver.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
void VirtualFrame::PushTryHandler(HandlerType type) {
ASSERT(cgen()->HasValidEntryRegisters());
// Grow the expression stack by handler size less one (the return
@@ -1163,3 +1197,5 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 7cda181865..1c9751bb12 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -31,6 +31,7 @@
#include "type-info.h"
#include "register-allocator.h"
#include "scopes.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
@@ -98,23 +99,16 @@ class VirtualFrame : public ZoneObject {
return register_locations_[num];
}
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
+ inline int register_location(Register reg);
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
+ inline void set_register_location(Register reg, int index);
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
- bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
- }
+ inline bool is_used(Register reg);
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
@@ -151,6 +145,9 @@ class VirtualFrame : public ZoneObject {
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Spill the top element of the frame to memory.
+ void SpillTop() { SpillElementAt(element_count() - 1); }
+
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
@@ -218,10 +215,7 @@ class VirtualFrame : public ZoneObject {
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
- }
+ inline void SetElementAt(int index, Handle<Object> value);
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
@@ -302,10 +296,7 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
- }
+ inline Result CallStub(CodeStub* stub, int arg_count);
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@@ -345,13 +336,33 @@ class VirtualFrame : public ZoneObject {
// frame. They are not dropped.
Result CallKeyedLoadIC(RelocInfo::Mode mode);
- // Call store IC. Name, value, and receiver are found on top of the
- // frame. Receiver is not dropped.
- Result CallStoreIC();
+
+ // Calling a store IC and a keyed store IC differ only by which ic is called
+ // and by the order of the three arguments on the frame.
+ Result CallCommonStoreIC(Handle<Code> ic,
+ Result* value,
+ Result *key,
+ Result* receiver);
+
+ // Call store IC. Name, value, and receiver are found on top
+ // of the frame. All are dropped.
+ Result CallStoreIC() {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Result name = Pop();
+ Result value = Pop();
+ Result receiver = Pop();
+ return CallCommonStoreIC(ic, &value, &name, &receiver);
+ }
// Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. Key and receiver are not dropped.
- Result CallKeyedStoreIC();
+ // of the frame. All are dropped.
+ Result CallKeyedStoreIC() {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Result value = Pop();
+ Result key = Pop();
+ Result receiver = Pop();
+ return CallCommonStoreIC(ic, &value, &key, &receiver);
+ }
// Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call.
@@ -446,8 +457,8 @@ class VirtualFrame : public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
+ inline int parameter_count();
+ inline int local_count();
// The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address
@@ -560,6 +571,14 @@ class VirtualFrame : public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
+ // This function assumes that a and b are the only results that could be in
+ // the registers a_reg or b_reg. Other results can be live, but must not
+ // be in the registers a_reg or b_reg. The results a and b are invalidated.
+ void MoveResultsToRegisters(Result* a,
+ Result* b,
+ Register a_reg,
+ Register b_reg);
+
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
Result RawCallStub(CodeStub* stub);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 6133cdb2ff..f587fc8a17 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -114,7 +114,8 @@ TEST(CodeEvents) {
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeDeleteEvent(ToAddress(0x1600));
- processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000));
+ processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000),
+ CodeEntry::kNoSecurityToken);
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
@@ -176,7 +177,8 @@ TEST(TickEvents) {
processor.Stop();
processor.Join();
- CpuProfile* profile = profiles.StopProfiling("", 1);
+ CpuProfile* profile =
+ profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
CHECK_NE(NULL, profile);
// Check call trees.
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index d90be8e4c7..4b4c950478 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -6196,7 +6196,28 @@ TEST(DebugContextIsPreservedBetweenAccesses) {
v8::Local<v8::Context> context1 = v8::Debug::GetDebugContext();
v8::Local<v8::Context> context2 = v8::Debug::GetDebugContext();
CHECK_EQ(*context1, *context2);
- // Make sure debugger is unloaded before running other tests.
- v8::internal::ForceUnloadDebugger();
+}
+
+
+static v8::Handle<v8::Value> expected_callback_data;
+static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
+ CHECK(details.GetEventContext() == expected_context);
+ CHECK_EQ(expected_callback_data, details.GetCallbackData());
+}
+
+// Check that event details contain context where debug event occured.
+TEST(DebugEventContext) {
+ v8::HandleScope scope;
+ expected_callback_data = v8::Int32::New(2010);
+ v8::Debug::SetDebugEventListener2(DebugEventContextChecker,
+ expected_callback_data);
+ expected_context = v8::Context::New();
+ v8::Context::Scope context_scope(expected_context);
+ v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
+ expected_context.Dispose();
+ expected_context.Clear();
+ v8::Debug::SetDebugEventListener(NULL);
+ expected_context_data = v8::Handle<v8::Value>();
CheckDebuggerUnloaded();
}
+
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 02e64b488e..f94cd457cd 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -237,6 +237,7 @@ TEST(DisasmIa320) {
__ cld();
__ rep_movs();
__ rep_stos();
+ __ stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, Operand(ebx));
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 4d9d75927d..3fd5c69b1c 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -66,28 +66,6 @@ static void DoTraceHideCEntryFPAddress(Address fp) {
}
-static void CheckRetAddrIsInFunction(const char* func_name,
- Address ret_addr,
- Address func_start_addr,
- unsigned int func_len) {
- printf("CheckRetAddrIsInFunction \"%s\": %p %p %p\n",
- func_name, func_start_addr, ret_addr, func_start_addr + func_len);
- CHECK_GE(ret_addr, func_start_addr);
- CHECK_GE(func_start_addr + func_len, ret_addr);
-}
-
-
-static void CheckRetAddrIsInJSFunction(const char* func_name,
- Address ret_addr,
- Handle<JSFunction> func) {
- v8::internal::Code* func_code = func->code();
- CheckRetAddrIsInFunction(
- func_name, ret_addr,
- func_code->instruction_start(),
- func_code->ExecutableSize());
-}
-
-
// --- T r a c e E x t e n s i o n ---
class TraceExtension : public v8::Extension {
@@ -209,11 +187,16 @@ static Handle<JSFunction> GetGlobalJSFunction(const char* name) {
}
-static void CheckRetAddrIsInJSFunction(const char* func_name,
- Address ret_addr) {
- CheckRetAddrIsInJSFunction(func_name,
- ret_addr,
- GetGlobalJSFunction(func_name));
+static void CheckObjectIsJSFunction(const char* func_name,
+ Address addr) {
+ i::Object* obj = reinterpret_cast<i::Object*>(addr);
+ CHECK(obj->IsJSFunction());
+ CHECK(JSFunction::cast(obj)->shared()->name()->IsString());
+ i::SmartPointer<char> found_name =
+ i::String::cast(
+ JSFunction::cast(
+ obj)->shared()->name())->ToCString();
+ CHECK_EQ(func_name, *found_name);
}
@@ -272,6 +255,7 @@ static void CreateTraceCallerFunction(const char* func_name,
Handle<JSFunction> func = CompileFunction(trace_call_buf.start());
CHECK(!func.is_null());
i::FLAG_allow_natives_syntax = allow_natives_syntax;
+ func->shared()->set_name(*NewString(func_name));
#ifdef DEBUG
v8::internal::Code* func_code = func->code();
@@ -289,6 +273,13 @@ static void CreateTraceCallerFunction(const char* func_name,
// StackTracer uses Top::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
+#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
+ // TODO(711) The hack of replacing the inline runtime function
+ // RandomHeapNumber with GetFrameNumber does not work with the way the full
+ // compiler generates inline runtime calls.
+ i::FLAG_force_full_compiler = false;
+#endif
+
TickSample sample;
InitTraceEnv(&sample);
@@ -313,10 +304,8 @@ TEST(CFromJSStackTrace) {
// StackTracer::Trace
CHECK_GT(sample.frames_count, 1);
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
- CheckRetAddrIsInJSFunction("JSFuncDoTrace",
- sample.stack[0]);
- CheckRetAddrIsInJSFunction("JSTrace",
- sample.stack[1]);
+ CheckObjectIsJSFunction("JSFuncDoTrace", sample.stack[0]);
+ CheckObjectIsJSFunction("JSTrace", sample.stack[1]);
}
@@ -326,6 +315,13 @@ TEST(CFromJSStackTrace) {
// Top::c_entry_fp value. In this case, StackTracer uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
+#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
+ // TODO(711) The hack of replacing the inline runtime function
+ // RandomHeapNumber with GetFrameNumber does not work with the way the full
+ // compiler generates inline runtime calls.
+ i::FLAG_force_full_compiler = false;
+#endif
+
TickSample sample;
InitTraceEnv(&sample);
@@ -359,10 +355,8 @@ TEST(PureJSStackTrace) {
sample.function);
CHECK_GT(sample.frames_count, 1);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
- CheckRetAddrIsInJSFunction("JSTrace",
- sample.stack[0]);
- CheckRetAddrIsInJSFunction("OuterJSTrace",
- sample.stack[1]);
+ CheckObjectIsJSFunction("JSTrace", sample.stack[0]);
+ CheckObjectIsJSFunction("OuterJSTrace", sample.stack[1]);
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index e5850c9c61..b438d252e0 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -19,22 +19,64 @@ using i::ProfileTree;
using i::ProfileGenerator;
using i::SampleRateCalculator;
using i::TickSample;
+using i::TokenEnumerator;
using i::Vector;
+namespace v8 {
+namespace internal {
+
+class TokenEnumeratorTester {
+ public:
+ static i::List<bool>* token_removed(TokenEnumerator* te) {
+ return &te->token_removed_;
+ }
+};
+
+} } // namespace v8::internal
+
+TEST(TokenEnumerator) {
+ TokenEnumerator te;
+ CHECK_EQ(CodeEntry::kNoSecurityToken, te.GetTokenId(NULL));
+ v8::HandleScope hs;
+ v8::Local<v8::String> token1(v8::String::New("1"));
+ CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+ CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+ v8::Local<v8::String> token2(v8::String::New("2"));
+ CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+ CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+ CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+ {
+ v8::HandleScope hs;
+ v8::Local<v8::String> token3(v8::String::New("3"));
+ CHECK_EQ(2, te.GetTokenId(*v8::Utils::OpenHandle(*token3)));
+ CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+ CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+ }
+ CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
+ i::Heap::CollectAllGarbage(false);
+ CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
+ CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+ CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+}
+
+
TEST(ProfileNodeFindOrAddChild) {
ProfileNode node(NULL, NULL);
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
CHECK_NE(NULL, childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+ CodeEntry entry3(
+ i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
CHECK_NE(NULL, childNode3);
CHECK_NE(childNode1, childNode3);
@@ -75,9 +117,12 @@ class ProfileTreeTestHelper {
} // namespace
TEST(ProfileTreeAddPathFromStart) {
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry3(
+ i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -142,9 +187,12 @@ TEST(ProfileTreeAddPathFromStart) {
TEST(ProfileTreeAddPathFromEnd) {
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry3(
+ i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -222,11 +270,30 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(1, empty_tree.root()->total_ticks());
CHECK_EQ(1, empty_tree.root()->self_ticks());
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
CodeEntry* e1_path[] = {&entry1};
Vector<CodeEntry*> e1_path_vec(
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
+
+ ProfileTree single_child_tree;
+ single_child_tree.AddPathFromStart(e1_path_vec);
+ single_child_tree.root()->IncrementSelfTicks();
+ CHECK_EQ(0, single_child_tree.root()->total_ticks());
+ CHECK_EQ(1, single_child_tree.root()->self_ticks());
+ ProfileTreeTestHelper single_child_helper(&single_child_tree);
+ ProfileNode* node1 = single_child_helper.Walk(&entry1);
+ CHECK_NE(NULL, node1);
+ CHECK_EQ(0, node1->total_ticks());
+ CHECK_EQ(1, node1->self_ticks());
+ single_child_tree.CalculateTotalTicks();
+ CHECK_EQ(2, single_child_tree.root()->total_ticks());
+ CHECK_EQ(1, single_child_tree.root()->self_ticks());
+ CHECK_EQ(1, node1->total_ticks());
+ CHECK_EQ(1, node1->self_ticks());
+
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
CodeEntry* e1_e2_path[] = {&entry1, &entry2};
Vector<CodeEntry*> e1_e2_path_vec(
e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
@@ -241,7 +308,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
// Results in {root,0,0} -> {entry1,0,2} -> {entry2,0,3}
CHECK_EQ(0, flat_tree.root()->total_ticks());
CHECK_EQ(0, flat_tree.root()->self_ticks());
- ProfileNode* node1 = flat_helper.Walk(&entry1);
+ node1 = flat_helper.Walk(&entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(2, node1->self_ticks());
@@ -261,7 +328,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry* e2_path[] = {&entry2};
Vector<CodeEntry*> e2_path_vec(
e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+ CodeEntry entry3(
+ i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
CodeEntry* e3_path[] = {&entry3};
Vector<CodeEntry*> e3_path_vec(
e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
@@ -316,16 +384,119 @@ TEST(ProfileTreeCalculateTotalTicks) {
}
+TEST(ProfileTreeFilteredClone) {
+ ProfileTree source_tree;
+ const int token0 = 0, token1 = 1, token2 = 2;
+ CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0, token0);
+ CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0, token1);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0, token0);
+ CodeEntry entry4(
+ i::Logger::FUNCTION_TAG, "", "ddd", "", 0,
+ CodeEntry::kInheritsSecurityToken);
+
+ {
+ CodeEntry* e1_e2_path[] = {&entry1, &entry2};
+ Vector<CodeEntry*> e1_e2_path_vec(
+ e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
+ source_tree.AddPathFromStart(e1_e2_path_vec);
+ CodeEntry* e2_e4_path[] = {&entry2, &entry4};
+ Vector<CodeEntry*> e2_e4_path_vec(
+ e2_e4_path, sizeof(e2_e4_path) / sizeof(e2_e4_path[0]));
+ source_tree.AddPathFromStart(e2_e4_path_vec);
+ CodeEntry* e3_e1_path[] = {&entry3, &entry1};
+ Vector<CodeEntry*> e3_e1_path_vec(
+ e3_e1_path, sizeof(e3_e1_path) / sizeof(e3_e1_path[0]));
+ source_tree.AddPathFromStart(e3_e1_path_vec);
+ CodeEntry* e3_e2_path[] = {&entry3, &entry2};
+ Vector<CodeEntry*> e3_e2_path_vec(
+ e3_e2_path, sizeof(e3_e2_path) / sizeof(e3_e2_path[0]));
+ source_tree.AddPathFromStart(e3_e2_path_vec);
+ source_tree.CalculateTotalTicks();
+ // Results in -> {entry1,0,1,0} -> {entry2,1,1,1}
+ // {root,0,4,-1} -> {entry2,0,1,1} -> {entry4,1,1,inherits}
+ // -> {entry3,0,2,0} -> {entry1,1,1,0}
+ // -> {entry2,1,1,1}
+ CHECK_EQ(4, source_tree.root()->total_ticks());
+ CHECK_EQ(0, source_tree.root()->self_ticks());
+ }
+
+ {
+ ProfileTree token0_tree;
+ token0_tree.FilteredClone(&source_tree, token0);
+ // Should be -> {entry1,1,1,0}
+ // {root,1,4,-1} -> {entry3,1,2,0} -> {entry1,1,1,0}
+ // [self ticks from filtered nodes are attributed to their parents]
+ CHECK_EQ(4, token0_tree.root()->total_ticks());
+ CHECK_EQ(1, token0_tree.root()->self_ticks());
+ ProfileTreeTestHelper token0_helper(&token0_tree);
+ ProfileNode* node1 = token0_helper.Walk(&entry1);
+ CHECK_NE(NULL, node1);
+ CHECK_EQ(1, node1->total_ticks());
+ CHECK_EQ(1, node1->self_ticks());
+ CHECK_EQ(NULL, token0_helper.Walk(&entry2));
+ ProfileNode* node3 = token0_helper.Walk(&entry3);
+ CHECK_NE(NULL, node3);
+ CHECK_EQ(2, node3->total_ticks());
+ CHECK_EQ(1, node3->self_ticks());
+ ProfileNode* node3_1 = token0_helper.Walk(&entry3, &entry1);
+ CHECK_NE(NULL, node3_1);
+ CHECK_EQ(1, node3_1->total_ticks());
+ CHECK_EQ(1, node3_1->self_ticks());
+ CHECK_EQ(NULL, token0_helper.Walk(&entry3, &entry2));
+ }
+
+ {
+ ProfileTree token1_tree;
+ token1_tree.FilteredClone(&source_tree, token1);
+ // Should be
+ // {root,1,4,-1} -> {entry2,2,3,1} -> {entry4,1,1,inherits}
+ // [child nodes referring to the same entry get merged and
+ // their self times summed up]
+ CHECK_EQ(4, token1_tree.root()->total_ticks());
+ CHECK_EQ(1, token1_tree.root()->self_ticks());
+ ProfileTreeTestHelper token1_helper(&token1_tree);
+ CHECK_EQ(NULL, token1_helper.Walk(&entry1));
+ CHECK_EQ(NULL, token1_helper.Walk(&entry3));
+ ProfileNode* node2 = token1_helper.Walk(&entry2);
+ CHECK_NE(NULL, node2);
+ CHECK_EQ(3, node2->total_ticks());
+ CHECK_EQ(2, node2->self_ticks());
+ ProfileNode* node2_4 = token1_helper.Walk(&entry2, &entry4);
+ CHECK_NE(NULL, node2_4);
+ CHECK_EQ(1, node2_4->total_ticks());
+ CHECK_EQ(1, node2_4->self_ticks());
+ }
+
+ {
+ ProfileTree token2_tree;
+ token2_tree.FilteredClone(&source_tree, token2);
+ // Should be
+ // {root,4,4,-1}
+ // [no nodes, all ticks get migrated into root node]
+ CHECK_EQ(4, token2_tree.root()->total_ticks());
+ CHECK_EQ(4, token2_tree.root()->self_ticks());
+ ProfileTreeTestHelper token2_helper(&token2_tree);
+ CHECK_EQ(NULL, token2_helper.Walk(&entry1));
+ CHECK_EQ(NULL, token2_helper.Walk(&entry2));
+ CHECK_EQ(NULL, token2_helper.Walk(&entry3));
+ }
+}
+
+
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
TEST(CodeMapAddCode) {
CodeMap code_map;
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
- CodeEntry entry4(i::Logger::FUNCTION_TAG, "", "ddd", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry3(
+ i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry4(
+ i::Logger::FUNCTION_TAG, "", "ddd", "", 0, CodeEntry::kNoSecurityToken);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
@@ -352,8 +523,10 @@ TEST(CodeMapAddCode) {
TEST(CodeMapMoveAndDeleteCode) {
CodeMap code_map;
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+ CodeEntry entry1(
+ i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+ CodeEntry entry2(
+ i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
@@ -425,7 +598,8 @@ TEST(RecordTickSample) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
- CpuProfile* profile = profiles.StopProfiling("", 1);
+ CpuProfile* profile =
+ profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
CHECK_NE(NULL, profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
diff --git a/deps/v8/test/mjsunit/arguments-load-across-eval.js b/deps/v8/test/mjsunit/arguments-load-across-eval.js
new file mode 100644
index 0000000000..e97c11329e
--- /dev/null
+++ b/deps/v8/test/mjsunit/arguments-load-across-eval.js
@@ -0,0 +1,86 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests loading of aguments across eval calls.
+
+// Test loading across an eval call that does not shadow variables.
+function testNoShadowing(x, h) {
+ function f() {
+ eval('1');
+ assertEquals(1, x);
+ assertEquals(2, h());
+ function g() {
+ assertEquals(1, x);
+ assertEquals(2, h());
+ }
+ g();
+ }
+ f();
+}
+
+testNoShadowing(1, function() { return 2; });
+
+// Test loading across eval calls that do not shadow variables.
+function testNoShadowing2(x, h) {
+ eval('1');
+ function f() {
+ eval('1');
+ assertEquals(1, x);
+ assertEquals(2, h());
+ function g() {
+ assertEquals(1, x);
+ assertEquals(2, h());
+ }
+ g();
+ }
+ f();
+}
+
+testNoShadowing2(1, function() { return 2; });
+
+// Test loading across an eval call that shadows variables.
+function testShadowing(x, h) {
+ function f() {
+ assertEquals(1, x);
+ assertEquals(2, h());
+ eval('var x = 3; var h = function() { return 4; };');
+ assertEquals(3, x);
+ assertEquals(4, h());
+ function g() {
+ assertEquals(3, x);
+ assertEquals(4, h());
+ }
+ g();
+ }
+ f();
+ assertEquals(1, x);
+ assertEquals(2, h());
+}
+
+testShadowing(1, function() { return 2; });
+
+
diff --git a/deps/v8/test/mjsunit/array-concat.js b/deps/v8/test/mjsunit/array-concat.js
index 2346c8de67..db89f4d0b8 100644
--- a/deps/v8/test/mjsunit/array-concat.js
+++ b/deps/v8/test/mjsunit/array-concat.js
@@ -29,7 +29,82 @@
* @fileoverview Test concat on small and large arrays
*/
-var poses = [140, 4000000000];
+var poses;
+
+poses = [140, 4000000000];
+while (pos = poses.shift()) {
+ var a = new Array(pos);
+ var array_proto = [];
+ a.__proto__ = array_proto;
+ assertEquals(pos, a.length);
+ a.push('foo');
+ assertEquals(pos + 1, a.length);
+ var b = ['bar'];
+ var c = a.concat(b);
+ assertEquals(pos + 2, c.length);
+ assertEquals("undefined", typeof(c[pos - 1]));
+ assertEquals("foo", c[pos]);
+ assertEquals("bar", c[pos + 1]);
+
+ // Can we fool the system by putting a number in a string?
+ var onetwofour = "124";
+ a[onetwofour] = 'doo';
+ assertEquals(a[124], 'doo');
+ c = a.concat(b);
+ assertEquals(c[124], 'doo');
+
+ // If we put a number in the prototype, then the spec says it should be
+ // copied on concat.
+ array_proto["123"] = 'baz';
+ assertEquals(a[123], 'baz');
+
+ c = a.concat(b);
+ assertEquals(pos + 2, c.length);
+ assertEquals("baz", c[123]);
+ assertEquals("undefined", typeof(c[pos - 1]));
+ assertEquals("foo", c[pos]);
+ assertEquals("bar", c[pos + 1]);
+
+ // When we take the number off the prototype it disappears from a, but
+ // the concat put it in c itself.
+ array_proto["123"] = undefined;
+ assertEquals("undefined", typeof(a[123]));
+ assertEquals("baz", c[123]);
+
+ // If the element of prototype is shadowed, the element on the instance
+ // should be copied, but not the one on the prototype.
+ array_proto[123] = 'baz';
+ a[123] = 'xyz';
+ assertEquals('xyz', a[123]);
+ c = a.concat(b);
+ assertEquals('xyz', c[123]);
+
+ // Non-numeric properties on the prototype or the array shouldn't get
+ // copied.
+ array_proto.moe = 'joe';
+ a.ben = 'jerry';
+ assertEquals(a["moe"], 'joe');
+ assertEquals(a["ben"], 'jerry');
+ c = a.concat(b);
+ // ben was not copied
+ assertEquals("undefined", typeof(c.ben));
+
+ // When we take moe off the prototype it disappears from all arrays.
+ array_proto.moe = undefined;
+ assertEquals("undefined", typeof(c.moe));
+
+ // Negative indices don't get concated.
+ a[-1] = 'minus1';
+ assertEquals("minus1", a[-1]);
+ assertEquals("undefined", typeof(a[0xffffffff]));
+ c = a.concat(b);
+ assertEquals("undefined", typeof(c[-1]));
+ assertEquals("undefined", typeof(c[0xffffffff]));
+ assertEquals(c.length, a.length + 1);
+
+}
+
+poses = [140, 4000000000];
while (pos = poses.shift()) {
var a = new Array(pos);
assertEquals(pos, a.length);
@@ -91,7 +166,7 @@ while (pos = poses.shift()) {
Array.prototype.moe = undefined;
assertEquals("undefined", typeof(c.moe));
- // Negative indeces don't get concated.
+ // Negative indices don't get concated.
a[-1] = 'minus1';
assertEquals("minus1", a[-1]);
assertEquals("undefined", typeof(a[0xffffffff]));
diff --git a/deps/v8/test/mjsunit/array-pop.js b/deps/v8/test/mjsunit/array-pop.js
index a8d131e20f..f193f09c2f 100644
--- a/deps/v8/test/mjsunit/array-pop.js
+++ b/deps/v8/test/mjsunit/array-pop.js
@@ -81,6 +81,34 @@
}
Array.prototype.length = 0; // Clean-up.
}
+
+ // Check that pop works on inherited properties for
+ // arrays with array prototype.
+ for (var i = 0; i < 10 ;i++) { // Ensure ICs are stabilized.
+ var array_proto = [];
+ array_proto[1] = 1;
+ array_proto[3] = 3;
+ array_proto[5] = 5;
+ array_proto[7] = 7;
+ array_proto[9] = 9;
+ a = [0,1,2,,4,,6,7,8,,];
+ a.__proto__ = array_proto;
+ assertEquals(10, a.length, "array_proto-inherit-initial-length");
+ for (var j = 9; j >= 0; j--) {
+ assertEquals(j + 1, a.length, "array_proto-inherit-pre-length-" + j);
+ assertTrue(j in a, "array_proto-has property " + j);
+ var own = a.hasOwnProperty(j);
+ var inherited = array_proto.hasOwnProperty(j);
+ assertEquals(j, a.pop(), "array_proto-inherit-pop");
+ assertEquals(j, a.length, "array_proto-inherit-post-length");
+ assertFalse(a.hasOwnProperty(j), "array_proto-inherit-deleted-own-" + j);
+ assertEquals(inherited, array_proto.hasOwnProperty(j),
+ "array_proto-inherit-not-deleted-inherited" + j);
+ }
+ }
+
+ // Check that pop works on inherited properties for
+ // arrays with array prototype.
})();
// Test the case of not JSArray receiver.
diff --git a/deps/v8/test/mjsunit/array-shift.js b/deps/v8/test/mjsunit/array-shift.js
index d985b31e06..3601cbbb89 100644
--- a/deps/v8/test/mjsunit/array-shift.js
+++ b/deps/v8/test/mjsunit/array-shift.js
@@ -69,3 +69,40 @@
assertTrue(delete Array.prototype[5]);
assertTrue(delete Array.prototype[7]);
})();
+
+// Now check the case with array of holes and some elements on prototype
+// which is an array itself.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+ var array_proto = new Array();
+ array_proto[3] = "@3";
+ array_proto[7] = "@7";
+ array.__proto__ = array_proto;
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], array_proto[i]);
+ }
+
+ array.shift();
+
+ assertEquals(len - 1, array.length);
+ // Note that shift copies values from prototype into the array.
+ assertEquals(array[2], array_proto[3]);
+ assertTrue(array.hasOwnProperty(2));
+
+ assertEquals(array[6], array_proto[7]);
+ assertTrue(array.hasOwnProperty(6));
+
+ // ... but keeps the rest as holes:
+ array_proto[5] = "@5";
+ assertEquals(array[5], array_proto[5]);
+ assertFalse(array.hasOwnProperty(5));
+
+ assertEquals(array[3], array_proto[3]);
+ assertFalse(array.hasOwnProperty(3));
+
+ assertEquals(array[7], array_proto[7]);
+ assertFalse(array.hasOwnProperty(7));
+})();
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
index 30e9f3e9ee..8f9ce53586 100644
--- a/deps/v8/test/mjsunit/array-slice.js
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -127,6 +127,53 @@
// Now check the case with array of holes and some elements on prototype.
+// Note: that is important that this test runs before the next one
+// as the next one tampers Array.prototype.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+
+ var at3 = "@3";
+ var at7 = "@7";
+
+ for (var i = 0; i < 7; i++) {
+ var array_proto = [];
+ array_proto[3] = at3;
+ array_proto[7] = at7;
+ array.__proto__ = array_proto;
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], array_proto[i]);
+ }
+
+ var sliced = array.slice();
+
+ assertEquals(len, sliced.length);
+
+ assertTrue(delete array_proto[3]);
+ assertTrue(delete array_proto[7]);
+
+ // Note that slice copies values from prototype into the array.
+ assertEquals(array[3], undefined);
+ assertFalse(array.hasOwnProperty(3));
+ assertEquals(sliced[3], at3);
+ assertTrue(sliced.hasOwnProperty(3));
+
+ assertEquals(array[7], undefined);
+ assertFalse(array.hasOwnProperty(7));
+ assertEquals(sliced[7], at7);
+ assertTrue(sliced.hasOwnProperty(7));
+
+ // ... but keeps the rest as holes:
+ array_proto[5] = "@5";
+ assertEquals(array[5], array_proto[5]);
+ assertFalse(array.hasOwnProperty(5));
+ }
+})();
+
+
+// Now check the case with array of holes and some elements on prototype.
(function() {
var len = 9;
var array = new Array(len);
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 887097db61..88c4876496 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -255,6 +255,56 @@
for (var i = 0; i < 7; i++) {
var array = new Array(len);
+ var array_proto = [];
+ array_proto[3] = at3;
+ array_proto[7] = at7;
+ array.__proto__ = array_proto;
+
+ var spliced = array.splice(2, 2, 'one', undefined, 'two');
+
+ // Second hole (at index 3) of array turns into
+ // value of Array.prototype[3] while copying.
+ assertEquals([, at3], spliced);
+ assertEquals([, , 'one', undefined, 'two', , , at7, at7, ,], array);
+
+ // ... but array[3] and array[7] is actually a hole:
+ assertTrue(delete array_proto[3]);
+ assertEquals(undefined, array[3]);
+ assertTrue(delete array_proto[7]);
+ assertEquals(undefined, array[7]);
+
+ // and now check hasOwnProperty
+ assertFalse(array.hasOwnProperty(0), "array.hasOwnProperty(0)");
+ assertFalse(array.hasOwnProperty(1), "array.hasOwnProperty(1)");
+ assertTrue(array.hasOwnProperty(2));
+ assertTrue(array.hasOwnProperty(3));
+ assertTrue(array.hasOwnProperty(4));
+ assertFalse(array.hasOwnProperty(5), "array.hasOwnProperty(5)");
+ assertFalse(array.hasOwnProperty(6), "array.hasOwnProperty(6)");
+ assertFalse(array.hasOwnProperty(7), "array.hasOwnProperty(7)");
+ assertTrue(array.hasOwnProperty(8));
+ assertFalse(array.hasOwnProperty(9), "array.hasOwnProperty(9)");
+
+ // and now check couple of indices above length.
+ assertFalse(array.hasOwnProperty(10), "array.hasOwnProperty(10)");
+ assertFalse(array.hasOwnProperty(15), "array.hasOwnProperty(15)");
+ assertFalse(array.hasOwnProperty(31), "array.hasOwnProperty(31)");
+ assertFalse(array.hasOwnProperty(63), "array.hasOwnProperty(63)");
+ assertFalse(array.hasOwnProperty(2 << 32 - 1),
+ "array.hasOwnProperty(2 << 31 - 1)");
+ }
+})();
+
+
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+
+ var at3 = "@3";
+ var at7 = "@7";
+
+ for (var i = 0; i < 7; i++) {
+ var array = new Array(len);
Array.prototype[3] = at3;
Array.prototype[7] = at7;
@@ -265,7 +315,9 @@
assertEquals([, at3], spliced);
assertEquals([, , 'one', undefined, 'two', , , at7, at7, ,], array);
- // ... but array[7] is actually a hole:
+ // ... but array[3] and array[7] is actually a hole:
+ assertTrue(delete Array.prototype[3]);
+ assertEquals(undefined, array[3]);
assertTrue(delete Array.prototype[7]);
assertEquals(undefined, array[7]);
@@ -286,7 +338,8 @@
assertFalse(array.hasOwnProperty(15), "array.hasOwnProperty(15)");
assertFalse(array.hasOwnProperty(31), "array.hasOwnProperty(31)");
assertFalse(array.hasOwnProperty(63), "array.hasOwnProperty(63)");
- assertFalse(array.hasOwnProperty(2 << 32 - 1), "array.hasOwnProperty(2 << 31 - 1)");
+ assertFalse(array.hasOwnProperty(2 << 32 - 1),
+ "array.hasOwnProperty(2 << 31 - 1)");
}
})();
diff --git a/deps/v8/test/mjsunit/array-unshift.js b/deps/v8/test/mjsunit/array-unshift.js
index dbe245b8b4..c4cc95cbb4 100644
--- a/deps/v8/test/mjsunit/array-unshift.js
+++ b/deps/v8/test/mjsunit/array-unshift.js
@@ -37,8 +37,8 @@
})();
-// Check that unshif with no args has a side-effect of
-// feeling the holes with elements from the prototype
+// Check that unshift with no args has a side-effect of
+// filling the holes with elements from the prototype
// (if present, of course)
(function() {
var len = 3;
@@ -115,6 +115,81 @@
assertTrue(delete Array.prototype[7]);
})();
+// Check that unshift with no args has a side-effect of
+// filling the holes with elements from the prototype
+// (if present, of course)
+(function() {
+ var len = 3;
+ var array = new Array(len);
+
+ var at0 = '@0';
+ var at2 = '@2';
+
+ var array_proto = [];
+ array_proto[0] = at0;
+ array_proto[2] = at2;
+ array.__proto__ = array_proto;
+
+ // array owns nothing...
+ assertFalse(array.hasOwnProperty(0));
+ assertFalse(array.hasOwnProperty(1));
+ assertFalse(array.hasOwnProperty(2));
+
+ // ... but sees values from array_proto.
+ assertEquals(array[0], at0);
+ assertEquals(array[1], undefined);
+ assertEquals(array[2], at2);
+
+ assertEquals(len, array.unshift());
+
+ // unshift makes array own 0 and 2...
+ assertTrue(array.hasOwnProperty(0));
+ assertFalse(array.hasOwnProperty(1));
+ assertTrue(array.hasOwnProperty(2));
+
+ // ... so they are not affected be delete.
+ assertEquals(array[0], at0);
+ assertEquals(array[1], undefined);
+ assertEquals(array[2], at2);
+})();
+
+
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+ var array_proto = []
+ array_proto[3] = "@3";
+ array_proto[7] = "@7";
+ array.__proto__ = array_proto;
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], array_proto[i]);
+ }
+
+ assertEquals(len + 1, array.unshift('head'));
+
+ assertEquals(len + 1, array.length);
+ // Note that unshift copies values from prototype into the array.
+ assertEquals(array[4], array_proto[3]);
+ assertTrue(array.hasOwnProperty(4));
+
+ assertEquals(array[8], array_proto[7]);
+ assertTrue(array.hasOwnProperty(8));
+
+ // ... but keeps the rest as holes:
+ array_proto[5] = "@5";
+ assertEquals(array[5], array_proto[5]);
+ assertFalse(array.hasOwnProperty(5));
+
+ assertEquals(array[3], array_proto[3]);
+ assertFalse(array.hasOwnProperty(3));
+
+ assertEquals(array[7], array_proto[7]);
+ assertFalse(array.hasOwnProperty(7));
+})();
+
// Check the behaviour when approaching maximal values for length.
(function() {
for (var i = 0; i < 7; i++) {
diff --git a/deps/v8/test/mjsunit/compiler/assignment.js b/deps/v8/test/mjsunit/compiler/assignment.js
index ee2d323781..6aded4e9c9 100644
--- a/deps/v8/test/mjsunit/compiler/assignment.js
+++ b/deps/v8/test/mjsunit/compiler/assignment.js
@@ -262,3 +262,15 @@ function bar_loop() {
}
bar_loop();
+
+
+// Test for assignment using a keyed store ic:
+function store_i_in_element_i_of_object_i() {
+ var i = new Object();
+ i[i] = i;
+}
+
+// Run three times to exercise caches.
+store_i_in_element_i_of_object_i();
+store_i_in_element_i_of_object_i();
+store_i_in_element_i_of_object_i();
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 07c4e7eff3..558282f52b 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -27,6 +27,8 @@
function MjsUnitAssertionError(message) {
this.message = message;
+ // This allows fetching the stack trace using TryCatch::StackTrace.
+ this.stack = new Error("").stack;
}
MjsUnitAssertionError.prototype.toString = function () {
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 47963fe641..514d345a46 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -48,10 +48,6 @@ unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
# Skip long running test in debug and allow it to timeout in release mode.
regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
-# Skip experimental liveedit drop frame on non-ia32 architectures.
-# debug-liveedit-check-stack: SKIP if $arch != ia32
-debug-liveedit-check-stack: SKIP
-
[ $arch == arm ]
# Slow tests which times out in debug mode.
@@ -68,7 +64,19 @@ array-splice: PASS || TIMEOUT
# Skip long running test in debug mode on ARM.
string-indexof-2: PASS, SKIP if $mode == debug
+# Stack manipulations in LiveEdit is implemented for ia32 only.
+debug-liveedit-check-stack: SKIP
+
[ $arch == mips ]
+# Stack manipulations in LiveEdit is implemented for ia32 only.
+debug-liveedit-check-stack: SKIP
+
# Skip all tests on MIPS.
*: SKIP
+
+[ $arch == x64 ]
+# Stack manipulations in LiveEdit is implemented for ia32 only.
+debug-liveedit-check-stack: SKIP
+
+
diff --git a/deps/v8/test/mjsunit/property-load-across-eval.js b/deps/v8/test/mjsunit/property-load-across-eval.js
index e174b858ca..5419cc7f3b 100644
--- a/deps/v8/test/mjsunit/property-load-across-eval.js
+++ b/deps/v8/test/mjsunit/property-load-across-eval.js
@@ -41,6 +41,7 @@ function testNoShadowing() {
function f() {
eval('1');
assertEquals(1, x);
+ try { typeof(asdf); } catch(e) { assertUnreachable(); }
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
@@ -60,6 +61,7 @@ function testNoShadowing() {
assertEquals('const_local', local_const_initialized());
function g() {
assertEquals(1, x);
+ try { typeof(asdf); } catch(e) { assertUnreachable(); }
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index c92bfa6bd9..b4ec444784 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -161,6 +161,7 @@ js1_5/Regress/regress-416628: PASS || FAIL || TIMEOUT if $mode == debug
# In Denmark the adjustment starts one week earlier!.
# Tests based on shell that use dates in this gap are flaky.
ecma/Date/15.9.5.10-1: PASS || FAIL
+ecma/Date/15.9.5.10-2: PASS || TIMEOUT if ($arch == arm && $mode == debug)
ecma/Date/15.9.5.12-1: PASS || FAIL
ecma/Date/15.9.5.14: PASS || FAIL
ecma/Date/15.9.5.34-1: PASS || FAIL
diff --git a/deps/v8/tools/gc-nvp-trace-processor.py b/deps/v8/tools/gc-nvp-trace-processor.py
new file mode 100644
index 0000000000..3721b01823
--- /dev/null
+++ b/deps/v8/tools/gc-nvp-trace-processor.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# This is an utility for plotting charts based on GC traces produced by V8 when
+# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
+# plotting.
+#
+# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
+#
+
+
+from __future__ import with_statement
+import sys, types, re, subprocess
+
+def flatten(l):
+ flat = []
+ for i in l: flat.extend(i)
+ return flat
+
+def split_nvp(s):
+ t = {}
+ for m in re.finditer(r"(\w+)=(-?\d+)", s):
+ t[m.group(1)] = int(m.group(2))
+ return t
+
+def parse_gc_trace(input):
+ trace = []
+ with open(input) as f:
+ for line in f:
+ info = split_nvp(line)
+ if info and 'pause' in info and info['pause'] > 0:
+ info['i'] = len(trace)
+ trace.append(info)
+ return trace
+
+def extract_field_names(script):
+ fields = { 'data': true, 'in': true }
+
+ for m in re.finditer(r"$(\w+)", script):
+ field_name = m.group(1)
+ if field_name not in fields:
+ fields[field] = field_count
+ field_count = field_count + 1
+
+ return fields
+
+def gnuplot(script):
+ gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
+ gnuplot.stdin.write(script)
+ gnuplot.stdin.close()
+ gnuplot.wait()
+
+x1y1 = 'x1y1'
+x1y2 = 'x1y2'
+x2y1 = 'x2y1'
+x2y2 = 'x2y2'
+
+class Item(object):
+ def __init__(self, title, field, axis = x1y1, **keywords):
+ self.title = title
+ self.axis = axis
+ self.props = keywords
+ if type(field) is types.ListType:
+ self.field = field
+ else:
+ self.field = [field]
+
+ def fieldrefs(self):
+ return self.field
+
+ def to_gnuplot(self, context):
+ args = ['"%s"' % context.datafile,
+ 'using %s' % context.format_fieldref(self.field),
+ 'title "%s"' % self.title,
+ 'axis %s' % self.axis]
+ if 'style' in self.props:
+ args.append('with %s' % self.props['style'])
+ if 'lc' in self.props:
+ args.append('lc rgb "%s"' % self.props['lc'])
+ if 'fs' in self.props:
+ args.append('fs %s' % self.props['fs'])
+ return ' '.join(args)
+
+class Plot(object):
+ def __init__(self, *items):
+ self.items = items
+
+ def fieldrefs(self):
+ return flatten([item.fieldrefs() for item in self.items])
+
+ def to_gnuplot(self, ctx):
+ return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
+
+class Set(object):
+ def __init__(self, value):
+ self.value = value
+
+ def to_gnuplot(self, ctx):
+ return 'set ' + self.value
+
+ def fieldrefs(self):
+ return []
+
+class Context(object):
+ def __init__(self, datafile, field_to_index):
+ self.datafile = datafile
+ self.field_to_index = field_to_index
+
+ def format_fieldref(self, fieldref):
+ return ':'.join([str(self.field_to_index[field]) for field in fieldref])
+
+def collect_fields(plot):
+ field_to_index = {}
+ fields = []
+
+ def add_field(field):
+ if field not in field_to_index:
+ fields.append(field)
+ field_to_index[field] = len(fields)
+
+ for field in flatten([item.fieldrefs() for item in plot]):
+ add_field(field)
+
+ return (fields, field_to_index)
+
+def is_y2_used(plot):
+ for subplot in plot:
+ if isinstance(subplot, Plot):
+ for item in subplot.items:
+ if item.axis == x1y2 or item.axis == x2y2:
+ return True
+ return False
+
+def get_field(trace_line, field):
+ t = type(field)
+ if t is types.StringType:
+ return trace_line[field]
+ elif t is types.FunctionType:
+ return field(trace_line)
+
+def generate_datafile(datafile_name, trace, fields):
+ with open(datafile_name, 'w') as datafile:
+ for line in trace:
+ data_line = [str(get_field(line, field)) for field in fields]
+ datafile.write('\t'.join(data_line))
+ datafile.write('\n')
+
+def generate_script_and_datafile(plot, trace, datafile, output):
+ (fields, field_to_index) = collect_fields(plot)
+ generate_datafile(datafile, trace, fields)
+ script = [
+ 'set terminal png',
+ 'set output "%s"' % output,
+ 'set autoscale',
+ 'set ytics nomirror',
+ 'set xtics nomirror',
+ 'set key below'
+ ]
+
+ if is_y2_used(plot):
+ script.append('set autoscale y2')
+ script.append('set y2tics')
+
+ context = Context(datafile, field_to_index)
+
+ for item in plot:
+ script.append(item.to_gnuplot(context))
+
+ return '\n'.join(script)
+
+def plot_all(plots, trace, prefix):
+ charts = []
+
+ for plot in plots:
+ outfilename = "%s_%d.png" % (prefix, len(charts))
+ charts.append(outfilename)
+ script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
+ print 'Plotting %s...' % outfilename
+ gnuplot(script)
+
+ return charts
+
+def reclaimed_bytes(row):
+ return row['total_size_before'] - row['total_size_after']
+
+plots = [
+ [
+ Set('style fill solid 0.5 noborder'),
+ Set('style histogram rowstacked'),
+ Set('style data histograms'),
+ Plot(Item('Marking', 'mark', lc = 'purple'),
+ Item('Sweep', 'sweep', lc = 'blue'),
+ Item('Compaction', 'compact', lc = 'red'),
+ Item('Other',
+ lambda r: r['pause'] - r['mark'] - r['sweep'] - r['compact'],
+ lc = 'grey'))
+ ],
+ [
+ Set('style histogram rowstacked'),
+ Set('style data histograms'),
+ Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
+ fs = 'solid 0.4 noborder',
+ lc = 'green'),
+ Item('Total holes (after GC)', 'holes_size_before', x1y2,
+ fs = 'solid 0.4 noborder',
+ lc = 'red'),
+ Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
+ ],
+ [
+ Set('style histogram rowstacked'),
+ Set('style data histograms'),
+ Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
+ fs = 'solid 0.4 noborder',
+ lc = 'green'),
+ Item('Total holes (after GC)', 'holes_size_after', x1y2,
+ fs = 'solid 0.4 noborder',
+ lc = 'red'),
+ Item('GC Time', ['i', 'pause'],
+ style = 'lines',
+ lc = 'red'))
+ ],
+ [
+ Set('style fill solid 0.5 noborder'),
+ Set('style data histograms'),
+ Plot(Item('Allocated', 'allocated'),
+ Item('Reclaimed', reclaimed_bytes),
+ Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
+ ],
+]
+
+def process_trace(filename):
+ trace = parse_gc_trace(filename)
+ total_gc = reduce(lambda t,r: t + r['pause'], trace, 0)
+ max_gc = reduce(lambda t,r: max(t, r['pause']), trace, 0)
+ avg_gc = total_gc / len(trace)
+
+ charts = plot_all(plots, trace, filename)
+
+ with open(filename + '.html', 'w') as out:
+ out.write('<html><body>')
+ out.write('Total in GC: <b>%d</b><br/>' % total_gc)
+ out.write('Max in GC: <b>%d</b><br/>' % max_gc)
+ out.write('Avg in GC: <b>%d</b><br/>' % avg_gc)
+ for chart in charts:
+ out.write('<img src="%s">' % chart)
+ out.write('</body></html>')
+
+ print "%s generated." % (filename + '.html')
+
+if len(sys.argv) != 2:
+ print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+ sys.exit(1)
+
+process_trace(sys.argv[1])
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 25ba5ebf8e..5985b9fd14 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -39,19 +39,28 @@
'ENABLE_VMSTATE_TRACKING',
],
'conditions': [
- ['v8_target_arch=="arm"', {
- 'defines': [
- 'V8_TARGET_ARCH_ARM',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'defines': [
- 'V8_TARGET_ARCH_IA32',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'defines': [
- 'V8_TARGET_ARCH_X64',
+ ['OS!="mac"', {
+ # TODO(mark): The OS!="mac" conditional is temporary. It can be
+ # removed once the Mac Chromium build stops setting target_arch to
+ # ia32 and instead sets it to mac. Other checks in this file for
+ # OS=="mac" can be removed at that time as well. This can be cleaned
+ # up once http://crbug.com/44205 is fixed.
+ 'conditions': [
+ ['v8_target_arch=="arm"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_ARM',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_IA32',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_X64',
+ ],
+ }],
],
}],
],
@@ -229,6 +238,7 @@
'../../src/assembler.cc',
'../../src/assembler.h',
'../../src/ast.cc',
+ '../../src/ast-inl.h',
'../../src/ast.h',
'../../src/bootstrapper.cc',
'../../src/bootstrapper.h',
@@ -434,6 +444,7 @@
],
'sources': [
'../../src/fast-codegen.cc',
+ '../../src/jump-target-light.h',
'../../src/jump-target-light-inl.h',
'../../src/jump-target-light.cc',
'../../src/virtual-frame-light-inl.h',
@@ -462,6 +473,7 @@
'../../src/arm/register-allocator-arm.cc',
'../../src/arm/simulator-arm.cc',
'../../src/arm/stub-cache-arm.cc',
+ '../../src/arm/virtual-frame-arm-inl.h',
'../../src/arm/virtual-frame-arm.cc',
'../../src/arm/virtual-frame-arm.h',
],
@@ -474,11 +486,12 @@
}]
]
}],
- ['v8_target_arch=="ia32"', {
+ ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
'include_dirs+': [
'../../src/ia32',
],
'sources': [
+ '../../src/jump-target-heavy.h',
'../../src/jump-target-heavy-inl.h',
'../../src/jump-target-heavy.cc',
'../../src/virtual-frame-heavy-inl.h',
@@ -509,12 +522,13 @@
'../../src/ia32/virtual-frame-ia32.h',
],
}],
- ['v8_target_arch=="x64"', {
+ ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
'include_dirs+': [
'../../src/x64',
],
'sources': [
'../../src/fast-codegen.cc',
+ '../../src/jump-target-heavy.h',
'../../src/jump-target-heavy-inl.h',
'../../src/jump-target-heavy.cc',
'../../src/virtual-frame-heavy-inl.h',
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index 1e9d1e74cd..46aba8d478 100644
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -237,7 +237,6 @@
9FA38BC51175B2E500C4CD55 /* full-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */; };
9FA38BC61175B2E500C4CD55 /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */; };
9FA38BC71175B2E500C4CD55 /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */; };
- 9FA38BCE1175B30400C4CD55 /* assembler-thumb2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */; };
9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */; };
9FA38BD01175B30400C4CD55 /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */; };
9FA38BD11175B30400C4CD55 /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */; };
@@ -619,9 +618,6 @@
9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-ia32.cc"; path = "ia32/full-codegen-ia32.cc"; sourceTree = "<group>"; };
9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; };
9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-ia32.cc"; path = "ia32/virtual-frame-ia32.cc"; sourceTree = "<group>"; };
- 9FA38BC81175B30400C4CD55 /* assembler-thumb2-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-thumb2-inl.h"; path = "arm/assembler-thumb2-inl.h"; sourceTree = "<group>"; };
- 9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "assembler-thumb2.cc"; path = "arm/assembler-thumb2.cc"; sourceTree = "<group>"; };
- 9FA38BCA1175B30400C4CD55 /* assembler-thumb2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-thumb2.h"; path = "arm/assembler-thumb2.h"; sourceTree = "<group>"; };
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-arm.cc"; path = "arm/full-codegen-arm.cc"; sourceTree = "<group>"; };
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-arm.cc"; path = "arm/virtual-frame-arm.cc"; sourceTree = "<group>"; };
@@ -734,9 +730,6 @@
897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
- 9FA38BC81175B30400C4CD55 /* assembler-thumb2-inl.h */,
- 9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */,
- 9FA38BCA1175B30400C4CD55 /* assembler-thumb2.h */,
897FF1030E719B8F00D62E90 /* assembler.cc */,
897FF1040E719B8F00D62E90 /* assembler.h */,
897FF1050E719B8F00D62E90 /* ast.cc */,
@@ -1394,7 +1387,6 @@
89F23C400E78D5B2006B2466 /* allocation.cc in Sources */,
89F23C410E78D5B2006B2466 /* api.cc in Sources */,
89F23C970E78D5E3006B2466 /* assembler-arm.cc in Sources */,
- 9FA38BCE1175B30400C4CD55 /* assembler-thumb2.cc in Sources */,
89F23C430E78D5B2006B2466 /* assembler.cc in Sources */,
89F23C440E78D5B2006B2466 /* ast.cc in Sources */,
89F23C450E78D5B2006B2466 /* bootstrapper.cc in Sources */,
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index e0845276c8..004e16ee0e 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -205,6 +205,10 @@
>
</File>
<File
+ RelativePath="..\..\src\ast-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ast.cc"
>
</File>
@@ -613,6 +617,10 @@
>
</File>
<File
+ RelativePath="..\..\src\jump-target-heavy.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\jump-target.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index e035392cf2..39cd42afe1 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -205,6 +205,10 @@
>
</File>
<File
+ RelativePath="..\..\src\ast-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ast.cc"
>
</File>
@@ -597,6 +601,10 @@
>
</File>
<File
+ RelativePath="..\..\src\jump-target-light.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\jump-target.cc"
>
</File>
@@ -1013,6 +1021,10 @@
>
</File>
<File
+ RelativePath="..\..\src\arm\virtual-frame-arm-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\virtual-frame-arm.h"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index 25cac8ed9e..46078179e1 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -205,6 +205,10 @@
>
</File>
<File
+ RelativePath="..\..\src\ast-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ast.cc"
>
</File>
@@ -590,6 +594,10 @@
>
</File>
<File
+ RelativePath="..\..\src\jump-target-heavy.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\jump-target.cc"
>
</File>