summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2009-12-19 01:04:19 +0100
committerRyan Dahl <ry@tinyclouds.org>2009-12-19 01:04:19 +0100
commita98afdfb2f05d163b2d4145f2d98c4a7ffd13bfd (patch)
tree7efd649b7aceaab2d4d847ce79c4517fb616afe3
parent0981e7f663c9f6cfbccb49aa2956df499a63e60d (diff)
downloadnode-new-a98afdfb2f05d163b2d4145f2d98c4a7ffd13bfd.tar.gz
Revert "Upgrade V8 to 2.0.5"
This reverts commit 20b945df706b2b9fcbc1a84230372d288d497544. Broken on Hagen's Macintosh. Don't have time to investigate.
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/ChangeLog50
-rwxr-xr-x[-rw-r--r--]deps/v8/SConstruct19
-rw-r--r--deps/v8/include/v8.h26
-rwxr-xr-xdeps/v8/src/SConscript7
-rw-r--r--deps/v8/src/api.cc94
-rw-r--r--deps/v8/src/arm/assembler-arm.cc186
-rw-r--r--deps/v8/src/arm/assembler-arm.h221
-rw-r--r--deps/v8/src/arm/assembler-thumb2-inl.h267
-rw-r--r--deps/v8/src/arm/assembler-thumb2.cc1821
-rw-r--r--deps/v8/src/arm/assembler-thumb2.h1027
-rw-r--r--deps/v8/src/arm/codegen-arm.cc92
-rw-r--r--deps/v8/src/arm/codegen-arm.h21
-rw-r--r--deps/v8/src/arm/disasm-arm.cc21
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc350
-rw-r--r--deps/v8/src/arm/frames-arm.cc4
-rw-r--r--deps/v8/src/arm/ic-arm.cc10
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc30
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h6
-rw-r--r--deps/v8/src/arm/simulator-arm.cc20
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc47
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc3
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h1
-rw-r--r--deps/v8/src/assembler.cc10
-rw-r--r--deps/v8/src/assembler.h4
-rw-r--r--deps/v8/src/ast.h17
-rw-r--r--deps/v8/src/bootstrapper.cc26
-rw-r--r--deps/v8/src/bootstrapper.h4
-rw-r--r--deps/v8/src/code-stubs.cc143
-rw-r--r--deps/v8/src/code-stubs.h19
-rw-r--r--deps/v8/src/codegen.h49
-rw-r--r--deps/v8/src/compiler.cc78
-rw-r--r--deps/v8/src/execution.cc6
-rw-r--r--deps/v8/src/factory.cc15
-rw-r--r--deps/v8/src/factory.h6
-rw-r--r--deps/v8/src/fast-codegen.cc308
-rw-r--r--deps/v8/src/fast-codegen.h195
-rw-r--r--deps/v8/src/global-handles.cc17
-rw-r--r--deps/v8/src/globals.h19
-rw-r--r--deps/v8/src/heap-inl.h69
-rw-r--r--deps/v8/src/heap-profiler.cc5
-rw-r--r--deps/v8/src/heap-profiler.h6
-rw-r--r--deps/v8/src/heap.cc159
-rw-r--r--deps/v8/src/heap.h69
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc11
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc47
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc458
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h25
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc8
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc381
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc165
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc48
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h22
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc143
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc11
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h6
-rw-r--r--deps/v8/src/ic.cc22
-rw-r--r--deps/v8/src/ic.h24
-rw-r--r--deps/v8/src/macro-assembler.h5
-rw-r--r--deps/v8/src/mark-compact.cc74
-rw-r--r--deps/v8/src/math.js35
-rw-r--r--deps/v8/src/messages.js5
-rw-r--r--deps/v8/src/objects-inl.h12
-rw-r--r--deps/v8/src/objects.cc14
-rw-r--r--deps/v8/src/objects.h47
-rw-r--r--deps/v8/src/parser.cc3
-rw-r--r--deps/v8/src/prettyprinter.cc10
-rw-r--r--deps/v8/src/prettyprinter.h2
-rw-r--r--deps/v8/src/rewriter.cc2
-rw-r--r--deps/v8/src/runtime.cc105
-rw-r--r--deps/v8/src/runtime.h4
-rw-r--r--deps/v8/src/runtime.js6
-rw-r--r--deps/v8/src/scopes.cc3
-rw-r--r--deps/v8/src/scopes.h11
-rw-r--r--deps/v8/src/serialize.cc22
-rw-r--r--deps/v8/src/spaces.cc4
-rw-r--r--deps/v8/src/spaces.h27
-rw-r--r--deps/v8/src/stub-cache.cc4
-rw-r--r--deps/v8/src/stub-cache.h11
-rw-r--r--deps/v8/src/token.cc2
-rw-r--r--deps/v8/src/token.h9
-rw-r--r--deps/v8/src/v8-counters.h2
-rw-r--r--deps/v8/src/v8natives.js207
-rw-r--r--deps/v8/src/variables.cc4
-rw-r--r--deps/v8/src/variables.h14
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/codegen-x64.cc276
-rw-r--r--deps/v8/src/x64/codegen-x64.h38
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc354
-rw-r--r--deps/v8/src/x64/ic-x64.cc103
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc125
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h35
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc47
-rw-r--r--deps/v8/test/cctest/test-api.cc34
-rw-r--r--deps/v8/test/cctest/test-debug.cc33
-rwxr-xr-xdeps/v8/test/cctest/test-macro-assembler-x64.cc12
-rw-r--r--deps/v8/test/mjsunit/compiler/thisfunction.js35
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js1
-rw-r--r--deps/v8/test/mjsunit/math-min-max.js51
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status2
-rw-r--r--deps/v8/test/mjsunit/object-create.js250
-rw-r--r--deps/v8/test/mjsunit/regress/regress-524.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-540.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-545.js47
-rw-r--r--deps/v8/test/mjsunit/try.js45
-rwxr-xr-xdeps/v8/tools/stats-viewer.py100
108 files changed, 1615 insertions, 7621 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 974628d884..e5687e730e 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -14,7 +14,6 @@
*.pdb
#*#
*~
-.cpplint-cache
d8
d8_g
shell
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index af0ecded71..4fd7aa5b76 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -13,11 +13,10 @@ Daniel James <dnljms@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
-John Jozwiak <jjozwiak@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
-Subrato K De <subratokde@codeaurora.org>
+John Jozwiak <jjozwiak@codeaurora.org>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index f1b5453ef7..825431cdec 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,35 +1,3 @@
-2009-12-18: Version 2.0.5
-
- Extended to upper limit of map space to allow for 7 times as many map
- to be allocated (issue 524).
-
- Improved performance of code using closures.
-
- Improved performance of some binary operations involving doubles.
-
-
-2009-12-16: Version 2.0.4
-
- Added ECMAScript 5 Object.create.
-
- Improved performance of Math.max and Math.min.
-
- Optimized adding of strings on 64-bit platforms.
-
- Improved handling of external strings by using a separate table
- instead of weak handles. This improves garbage collection
- performance and uses less memory.
-
- Changed code generation for object and array literals in toplevel
- code to be more compact by doing more work in the runtime.
-
- Fixed a crash bug triggered when garbage collection happened during
- generation of a callback load inline cache stub.
-
- Fixed crash bug sometimes triggered when local variables shadowed
- parameters in functions that used the arguments object.
-
-
2009-12-03: Version 2.0.3
Optimized handling and adding of strings, for-in and Array.join.
@@ -67,7 +35,7 @@
Reverted a change which caused Chromium interactive ui test
failures.
-
+
2009-11-18: Version 2.0.0
Added support for VFP on ARM.
@@ -112,7 +80,7 @@
2009-10-16: Version 1.3.16
-
+
X64: Convert smis to holding 32 bits of payload.
Introduce v8::Integer::NewFromUnsigned method.
@@ -257,7 +225,7 @@
notifications when V8 has not yet been initialized.
Fixed ARM simulator compilation problem on Windows.
-
+
2009-08-25: Version 1.3.7
@@ -372,9 +340,9 @@
function is a built-in.
Initial implementation of constructor heap profile for JS objects.
-
+
More fine grained control of profiling aspects through the API.
-
+
Optimized the called as constructor check for API calls.
@@ -399,8 +367,8 @@
Added an external allocation limit to avoid issues where small V8
objects would hold on to large amounts of external memory without
causing garbage collections.
-
- Finished more of the inline caching stubs for x64 targets.
+
+ Finished more of the inline caching stubs for x64 targets.
2009-07-13: Version 1.2.14
@@ -480,9 +448,9 @@
Fixed a bug in the string type inference.
Fixed a bug in the handling of 'constant function' properties.
-
+
Improved overall performance.
-
+
2009-06-16: Version 1.2.8
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index ef0917fa78..edaa66b75b 100644..100755
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -143,9 +143,6 @@ LIBRARY_FLAGS = {
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
- 'library:shared': {
- 'CPPDEFINES': ['V8_SHARED']
- }
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
@@ -181,12 +178,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
- 'armvariant:thumb2': {
- 'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
- },
- 'armvariant:arm': {
- 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
- },
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
@@ -252,7 +243,6 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
- '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -665,11 +655,6 @@ SIMPLE_OPTIONS = {
'values': ['default', 'hidden'],
'default': 'hidden',
'help': 'shared library symbol visibility'
- },
- 'armvariant': {
- 'values': ['arm', 'thumb2', 'none'],
- 'default': 'none',
- 'help': 'generate thumb2 instructions instead of arm instructions (default)'
}
}
@@ -853,10 +838,6 @@ def PostprocessOptions(options):
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
- if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
- options['armvariant'] = 'arm'
- if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
- options['armvariant'] = 'none'
def ParseEnvOverrides(arg, imports):
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 2e30992edd..a8ee8d4329 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -833,26 +833,13 @@ class V8EXPORT String : public Primitive {
* Returns true if the string is both external and ascii
*/
bool IsExternalAscii() const;
-
- class V8EXPORT ExternalStringResourceBase {
- public:
- virtual ~ExternalStringResourceBase() {}
- protected:
- ExternalStringResourceBase() {}
- private:
- // Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&);
- void operator=(const ExternalStringResourceBase&);
- };
-
/**
* An ExternalStringResource is a wrapper around a two-byte string
* buffer that resides outside V8's heap. Implement an
* ExternalStringResource to manage the life cycle of the underlying
* buffer. Note that the string data must be immutable.
*/
- class V8EXPORT ExternalStringResource
- : public ExternalStringResourceBase {
+ class V8EXPORT ExternalStringResource { // NOLINT
public:
/**
* Override the destructor to manage the life cycle of the underlying
@@ -865,6 +852,10 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0;
protected:
ExternalStringResource() {}
+ private:
+ // Disallow copying and assigning.
+ ExternalStringResource(const ExternalStringResource&);
+ void operator=(const ExternalStringResource&);
};
/**
@@ -878,8 +869,7 @@ class V8EXPORT String : public Primitive {
* Use String::New or convert to 16 bit data for non-ASCII.
*/
- class V8EXPORT ExternalAsciiStringResource
- : public ExternalStringResourceBase {
+ class V8EXPORT ExternalAsciiStringResource { // NOLINT
public:
/**
* Override the destructor to manage the life cycle of the underlying
@@ -892,6 +882,10 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0;
protected:
ExternalAsciiStringResource() {}
+ private:
+ // Disallow copying and assigning.
+ ExternalAsciiStringResource(const ExternalAsciiStringResource&);
+ void operator=(const ExternalAsciiStringResource&);
};
/**
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 28996b05ee..3b0df17188 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -106,6 +106,7 @@ SOURCES = {
zone.cc
"""),
'arch:arm': Split("""
+ arm/assembler-arm.cc
arm/builtins-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
@@ -122,12 +123,6 @@ SOURCES = {
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
"""),
- 'armvariant:arm': Split("""
- arm/assembler-arm.cc
- """),
- 'armvariant:thumb2': Split("""
- arm/assembler-thumb2.cc
- """),
'arch:ia32': Split("""
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index d793b9f11c..93807a7c72 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3082,13 +3082,81 @@ i::Handle<i::String> NewExternalAsciiStringHandle(
}
+static void DisposeExternalString(v8::Persistent<v8::Value> obj,
+ void* parameter) {
+ ENTER_V8;
+ i::ExternalTwoByteString* str =
+ i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
+
+ // External symbols are deleted when they are pruned out of the symbol
+ // table. Generally external symbols are not registered with the weak handle
+ // callbacks unless they are upgraded to a symbol after being externalized.
+ if (!str->IsSymbol()) {
+ v8::String::ExternalStringResource* resource =
+ reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
+ if (resource != NULL) {
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
+ i::Counters::total_external_string_memory.Decrement(total_size);
+
+ // The object will continue to live in the JavaScript heap until the
+ // handle is entirely cleaned out by the next GC. For example the
+ // destructor for the resource below could bring it back to life again.
+ // Which is why we make sure to not have a dangling pointer here.
+ str->set_resource(NULL);
+ delete resource;
+ }
+ }
+
+ // In any case we do not need this handle any longer.
+ obj.Dispose();
+}
+
+
+static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
+ void* parameter) {
+ ENTER_V8;
+ i::ExternalAsciiString* str =
+ i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
+
+ // External symbols are deleted when they are pruned out of the symbol
+ // table. Generally external symbols are not registered with the weak handle
+ // callbacks unless they are upgraded to a symbol after being externalized.
+ if (!str->IsSymbol()) {
+ v8::String::ExternalAsciiStringResource* resource =
+ reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
+ if (resource != NULL) {
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
+ i::Counters::total_external_string_memory.Decrement(total_size);
+
+ // The object will continue to live in the JavaScript heap until the
+ // handle is entirely cleaned out by the next GC. For example the
+ // destructor for the resource below could bring it back to life again.
+ // Which is why we make sure to not have a dangling pointer here.
+ str->set_resource(NULL);
+ delete resource;
+ }
+ }
+
+ // In any case we do not need this handle any longer.
+ obj.Dispose();
+}
+
+
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
+ i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalString);
return Utils::ToLocal(result);
}
@@ -3100,7 +3168,13 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ // Operation was successful and the string is not a symbol. In this case
+ // we need to make sure that the we call the destructor for the external
+ // resource when no strong references to the string remain.
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalString);
}
return result;
}
@@ -3111,8 +3185,14 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
+ i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalAsciiString);
return Utils::ToLocal(result);
}
@@ -3125,7 +3205,13 @@ bool v8::String::MakeExternal(
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ // Operation was successful and the string is not a symbol. In this case
+ // we need to make sure that the we call the destructor for the external
+ // resource when no strong references to the string remain.
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalAsciiString);
}
return result;
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 07da800903..d9247288ca 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -114,55 +114,55 @@ CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0 = { 0 };
-SwVfpRegister s1 = { 1 };
-SwVfpRegister s2 = { 2 };
-SwVfpRegister s3 = { 3 };
-SwVfpRegister s4 = { 4 };
-SwVfpRegister s5 = { 5 };
-SwVfpRegister s6 = { 6 };
-SwVfpRegister s7 = { 7 };
-SwVfpRegister s8 = { 8 };
-SwVfpRegister s9 = { 9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0 = { 0 };
-DwVfpRegister d1 = { 1 };
-DwVfpRegister d2 = { 2 };
-DwVfpRegister d3 = { 3 };
-DwVfpRegister d4 = { 4 };
-DwVfpRegister d5 = { 5 };
-DwVfpRegister d6 = { 6 };
-DwVfpRegister d7 = { 7 };
-DwVfpRegister d8 = { 8 };
-DwVfpRegister d9 = { 9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
+Register s0 = { 0 };
+Register s1 = { 1 };
+Register s2 = { 2 };
+Register s3 = { 3 };
+Register s4 = { 4 };
+Register s5 = { 5 };
+Register s6 = { 6 };
+Register s7 = { 7 };
+Register s8 = { 8 };
+Register s9 = { 9 };
+Register s10 = { 10 };
+Register s11 = { 11 };
+Register s12 = { 12 };
+Register s13 = { 13 };
+Register s14 = { 14 };
+Register s15 = { 15 };
+Register s16 = { 16 };
+Register s17 = { 17 };
+Register s18 = { 18 };
+Register s19 = { 19 };
+Register s20 = { 20 };
+Register s21 = { 21 };
+Register s22 = { 22 };
+Register s23 = { 23 };
+Register s24 = { 24 };
+Register s25 = { 25 };
+Register s26 = { 26 };
+Register s27 = { 27 };
+Register s28 = { 28 };
+Register s29 = { 29 };
+Register s30 = { 30 };
+Register s31 = { 31 };
+
+Register d0 = { 0 };
+Register d1 = { 1 };
+Register d2 = { 2 };
+Register d3 = { 3 };
+Register d4 = { 4 };
+Register d5 = { 5 };
+Register d6 = { 6 };
+Register d7 = { 7 };
+Register d8 = { 8 };
+Register d9 = { 9 };
+Register d10 = { 10 };
+Register d11 = { 11 };
+Register d12 = { 12 };
+Register d13 = { 13 };
+Register d14 = { 14 };
+Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -1371,10 +1371,11 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
+void Assembler::fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@@ -1386,10 +1387,11 @@ void Assembler::vmov(const DwVfpRegister dst,
}
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
+void Assembler::fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@@ -1401,8 +1403,9 @@ void Assembler::vmov(const Register dst1,
}
-void Assembler::vmov(const SwVfpRegister dst,
+void Assembler::fmsr(const Register dst,
const Register src,
+ const SBit s,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1415,8 +1418,9 @@ void Assembler::vmov(const SwVfpRegister dst,
}
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
+void Assembler::fmrs(const Register dst,
+ const Register src,
+ const SBit s,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1429,9 +1433,10 @@ void Assembler::vmov(const Register dst,
}
-void Assembler::vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
+void Assembler::fsitod(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@@ -1443,9 +1448,10 @@ void Assembler::vcvt(const DwVfpRegister dst,
}
-void Assembler::vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
+void Assembler::ftosid(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@@ -1457,11 +1463,12 @@ void Assembler::vcvt(const SwVfpRegister dst,
}
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
+void Assembler::faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = faddd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1472,11 +1479,12 @@ void Assembler::vadd(const DwVfpRegister dst,
}
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
+void Assembler::fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1487,11 +1495,12 @@ void Assembler::vsub(const DwVfpRegister dst,
}
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
+void Assembler::fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@@ -1502,11 +1511,12 @@ void Assembler::vmul(const DwVfpRegister dst,
}
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
+void Assembler::fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fdivd(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@@ -1517,8 +1527,8 @@ void Assembler::vdiv(const DwVfpRegister dst,
}
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
+void Assembler::fcmp(const Register src1,
+ const Register src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index cd53dd6097..86bc18a247 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -103,94 +103,57 @@ extern Register sp;
extern Register lr;
extern Register pc;
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
-
+// Support for VFP registers s0 to s32 (d0 to d16).
+// Note that "sN:sM" is the same as "dN/2".
+extern Register s0;
+extern Register s1;
+extern Register s2;
+extern Register s3;
+extern Register s4;
+extern Register s5;
+extern Register s6;
+extern Register s7;
+extern Register s8;
+extern Register s9;
+extern Register s10;
+extern Register s11;
+extern Register s12;
+extern Register s13;
+extern Register s14;
+extern Register s15;
+extern Register s16;
+extern Register s17;
+extern Register s18;
+extern Register s19;
+extern Register s20;
+extern Register s21;
+extern Register s22;
+extern Register s23;
+extern Register s24;
+extern Register s25;
+extern Register s26;
+extern Register s27;
+extern Register s28;
+extern Register s29;
+extern Register s30;
+extern Register s31;
+
+extern Register d0;
+extern Register d1;
+extern Register d2;
+extern Register d3;
+extern Register d4;
+extern Register d5;
+extern Register d6;
+extern Register d7;
+extern Register d8;
+extern Register d9;
+extern Register d10;
+extern Register d11;
+extern Register d12;
+extern Register d13;
+extern Register d14;
+extern Register d15;
// Coprocessor register
struct CRegister {
@@ -796,45 +759,55 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
+ void fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmsr(const Register dst,
const Register src,
+ const SBit s = LeaveCC,
const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
+ void fmrs(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
+ void fsitod(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void ftosid(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+
+ void faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fcmp(const Register src1,
+ const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
diff --git a/deps/v8/src/arm/assembler-thumb2-inl.h b/deps/v8/src/arm/assembler-thumb2-inl.h
deleted file mode 100644
index 3808ef00fa..0000000000
--- a/deps/v8/src/arm/assembler-thumb2-inl.h
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
-#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
-
-#include "arm/assembler-thumb2.h"
-#include "cpu.h"
-
-
-namespace v8 {
-namespace internal {
-
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
- }
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // On ARM a "call instruction" is actually two instructions.
- // mov lr, pc
- // ldr pc, [pc, #XXX]
- return (Assembler::instr_at(pc_) == kMovLrPc)
- && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
- == kLdrPCPattern);
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_address_address_at(Address pc) {
- Instr instr = Memory::int32_at(pc);
- // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
- ASSERT((instr & 0x0f7f0000) == 0x051f0000);
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return pc + offset + 8;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
-}
-
-
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/deps/v8/src/arm/assembler-thumb2.cc b/deps/v8/src/arm/assembler-thumb2.cc
deleted file mode 100644
index 6c2b9032fa..0000000000
--- a/deps/v8/src/arm/assembler-thumb2.cc
+++ /dev/null
@@ -1,1821 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#include "arm/assembler-thumb2-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Safe default is no features.
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-void CpuFeatures::Probe() {
- // If the compiler is allowed to use vfp then we can use vfp too in our
- // code generation.
-#if !defined(__arm__)
- // For the simulator=arm build, always use VFP since the arm simulator has
- // VFP support.
- supported_ |= 1u << VFP3;
-#else
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- if (OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if
- // runtime detection of VFP returns true.
- supported_ |= 1u << VFP3;
- found_by_runtime_probing_ |= 1u << VFP3;
- }
-#endif
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0 = { 0 };
-Register r1 = { 1 };
-Register r2 = { 2 };
-Register r3 = { 3 };
-Register r4 = { 4 };
-Register r5 = { 5 };
-Register r6 = { 6 };
-Register r7 = { 7 };
-Register r8 = { 8 };
-Register r9 = { 9 };
-Register r10 = { 10 };
-Register fp = { 11 };
-Register ip = { 12 };
-Register sp = { 13 };
-Register lr = { 14 };
-Register pc = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0 = { 0 };
-CRegister cr1 = { 1 };
-CRegister cr2 = { 2 };
-CRegister cr3 = { 3 };
-CRegister cr4 = { 4 };
-CRegister cr5 = { 5 };
-CRegister cr6 = { 6 };
-CRegister cr7 = { 7 };
-CRegister cr8 = { 8 };
-CRegister cr9 = { 9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0 = { 0 };
-SwVfpRegister s1 = { 1 };
-SwVfpRegister s2 = { 2 };
-SwVfpRegister s3 = { 3 };
-SwVfpRegister s4 = { 4 };
-SwVfpRegister s5 = { 5 };
-SwVfpRegister s6 = { 6 };
-SwVfpRegister s7 = { 7 };
-SwVfpRegister s8 = { 8 };
-SwVfpRegister s9 = { 9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0 = { 0 };
-DwVfpRegister d1 = { 1 };
-DwVfpRegister d2 = { 2 };
-DwVfpRegister d3 = { 3 };
-DwVfpRegister d4 = { 4 };
-DwVfpRegister d5 = { 5 };
-DwVfpRegister d6 = { 6 };
-DwVfpRegister d7 = { 7 };
-DwVfpRegister d8 = { 8 };
-DwVfpRegister d9 = { 9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-thumb2-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
- // encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
- shift_op_ = ROR;
- shift_imm_ = 0;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler
-
-// Instruction encoding bits
-enum {
- H = 1 << 5, // halfword (or byte)
- S6 = 1 << 6, // signed (or unsigned)
- L = 1 << 20, // load (or store)
- S = 1 << 20, // set condition code (or leave unchanged)
- W = 1 << 21, // writeback base register (or leave unchanged)
- A = 1 << 21, // accumulate in multiply instruction (or not)
- B = 1 << 22, // unsigned byte (or word)
- N = 1 << 22, // long (or short)
- U = 1 << 23, // positive (or negative) offset/index
- P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
- I = 1 << 25, // immediate shifter operand (or not)
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
-
- // Instruction bit masks
- RdMask = 15 << 12, // in str instruction
- CondMask = 15 << 28,
- CoprocessorMask = 15 << 8,
- OpCodeMask = 15 << 21, // in data-processing instructions
- Imm24Mask = (1 << 24) - 1,
- Off12Mask = (1 << 12) - 1,
- // Reserved condition
- nv = 15 << 28
-};
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
- al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-static const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | sp.code() * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-static const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | sp.code() * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
-// ldr pc, [pc, #XXX]
-const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-
-// spare_buffer_
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
- if (buffer == NULL) {
- // do our own buffer management
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // use externally provided buffer instead
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // setup buffer pointers
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
- next_buffer_check_ = 0;
- no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
- last_bound_pos_ = 0;
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
-}
-
-
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // emit constant pool if necessary
- CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
-
- // setup desc
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
- }
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & Imm24Mask) << 8) >> 6;
- if ((instr & CondMask) == nv && (instr & B24) != 0)
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
-
- return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
- int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if ((instr & CondMask) == nv) {
- // blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- ASSERT((imm26 & 3) == 0);
- instr &= ~Imm24Mask;
- }
- int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~Imm24Mask) == 0) {
- PrintF("value\n");
- } else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- int cond = instr & CondMask;
- const char* b;
- const char* c;
- if (cond == nv) {
- b = "blx";
- c = "";
- } else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
- }
- }
- PrintF("%s%s\n", b, c);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- int fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // append appendix to L's list
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
- L->Unuse();
- }
-}
-
-
-// Low-level code emission routines depending on the addressing mode
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
- // imm32 must be unsigned
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
- if ((imm8 <= 0xff)) {
- *rotate_imm = rot;
- *immed_8 = imm8;
- return true;
- }
- }
- // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
- if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= 0x2*B21;
- return true;
- }
- }
- return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- return Serializer::enabled();
- } else if (rmode == RelocInfo::NONE) {
- return false;
- }
- return true;
-}
-
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
- CheckBuffer();
- ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // immediate
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'
- RecordRelocInfo(x.rmode_, x.imm32_);
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = static_cast<Condition>(instr & CondMask);
- if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- ldr(ip, MemOperand(pc, 0), cond);
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
- }
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // immediate shift
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- } else {
- // register shift
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
- }
- emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc))
- // block constant pool emission for one instruction after reading pc
- BlockConstPoolBefore(pc_offset() + kInstrSize);
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | B | L)) == B26);
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // immediate offset
- int offset_12 = x.offset_;
- if (offset_12 < 0) {
- offset_12 = -offset_12;
- am ^= U;
- }
- if (!is_uint12(offset_12)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_12 >= 0); // no masking needed
- instr |= offset_12;
- } else {
- // register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized
- ASSERT(!x.rm_.is(pc));
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // immediate offset
- int offset_8 = x.offset_;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- if (!is_uint8(offset_8)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_8 >= 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
- } else if (x.shift_imm_ != 0) {
- // scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- } else {
- // register offset
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
- instr |= x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
- emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // unindexed addressing is not encoded by this function
- ASSERT_EQ((B27 | B26),
- (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
- int am = x.am_;
- int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
- offset_8 >>= 2;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
-
- // post-indexed addressing requires W == 1; different than in addrmod2/3
- if ((am & P) == 0)
- am |= W;
-
- ASSERT(offset_8 >= 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(pc_offset());
- }
-
- // Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
-// Branch instructions
-void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & Imm24Mask));
-
- if (cond == al)
- // dead code is a good location to emit the constant pool
- CheckConstPool(false, false);
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) { // v5 and above
- WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
- int h = ((branch_offset & 2) >> 1)*B24;
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
- WriteRecordedPositions();
- ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
-}
-
-
-// Data-processing instructions
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 0*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 1*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 2*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 3*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 4*B21 | s, src1, dst, src2);
-
- // Eliminate pattern: push(r), pop()
- // str(src, MemOperand(sp, 4, NegPreIndex), al);
- // add(sp, sp, Operand(kPointerSize));
- // Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
- instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 5*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 6*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 7*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 8*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 9*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 10*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 11*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 12*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- WriteRecordedPositions();
- }
- addrmod1(cond | 13*B21 | s, r0, dst, src);
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | 14*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | 15*B21 | s, r0, dst, src);
-}
-
-
-// Multiply instructions
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- // dst goes in bits 16-19 for this instruction!
- emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions
-void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | B4 | src.code());
-}
-
-
-// Status register access instructions
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
- Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
- Instr instr;
- if (!src.rm_.is_valid()) {
- // immediate
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // immediate operand cannot be encoded, load it first to register ip
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- msr(fields, Operand(ip), cond);
- return;
- }
- instr = I | rotate_imm*B8 | immed_8;
- } else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
- instr = src.rm_.code();
- }
- emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- WriteRecordedPositions();
- }
- addrmod2(cond | B26 | L, dst, src);
-
- // Eliminate pattern: push(r), pop(r)
- // str(r, MemOperand(sp, 4, NegPreIndex), al)
- // ldr(r, MemOperand(sp, 4, PostIndex), al)
- // Both instructions can be eliminated.
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
- instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
-
- // Eliminate pattern: pop(), push(r)
- // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
- // -> str r, [sp, 0], al
- int pattern_size = 2 * kInstrSize;
- if (FLAG_push_pop_elimination &&
- last_bound_pos_ <= (pc_offset() - pattern_size) &&
- reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
- pc_ -= 2 * kInstrSize;
- emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
- if (FLAG_print_push_pop_elimination) {
- PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
- }
- }
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-// Load/Store multiple instructions
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
- Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
- addrmod4(cond | B27 | am | L, base, dst);
-
- // emit the constant pool after a function return implemented by ldm ..{..pc}
- if (cond == al && (dst & pc.bit()) != 0) {
- // There is a slight chance that the ldm instruction was actually a call,
- // in which case it would be wrong to return into the constant pool; we
- // recognize this case by checking if the emission of the pool was blocked
- // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
- // the case, we emit a jump over the pool.
- CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
- }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
- Condition cond) {
- addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Semaphore instructions
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
- Register src,
- Register base,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
- ASSERT(!dst.is(base) && !src.is(base));
- emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
- B7 | B4 | src.code());
-}
-
-
-// Exception-generating instructions and debugging support
-void Assembler::stop(const char* msg) {
-#if !defined(__arm__)
- // The simulator handles these special instructions and stops execution.
- emit(15 << 28 | ((intptr_t) msg));
-#else
- // Just issue a simple break instruction for now. Alternatively we could use
- // the swi(0x9f0001) instruction on Linux.
- bkpt(0);
-#endif
-}
-
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
-}
-
-
-void Assembler::swi(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // unindexed addressing
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // unindexed addressing
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-// Support for VFP.
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
- // Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src1.is(pc) && !src2.is(pc));
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
-}
-
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
- // <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond) {
- // Sn = Rt.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src.is(pc));
- emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
- src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Rt = Sn.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst.is(pc));
- emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
- dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
-}
-
-
-void Assembler::vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
- // Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
- dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
- (0x1 & src.code())*B5 | (src.code() >> 1));
-}
-
-
-void Assembler::vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
- // Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
- // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
- 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
- 0x5*B9 | B8 | B7 | B6 | src.code());
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const SBit s,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xF*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-// Pseudo instructions
-void Assembler::lea(Register dst,
- const MemOperand& x,
- SBit s,
- Condition cond) {
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // immediate offset
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.offset_), s, cond);
- else
- add(dst, x.rn_, Operand(x.offset_), s, cond);
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- if ((am & P) == 0) // post indexing
- mov(dst, Operand(x.rn_), s, cond);
- else if ((am & U) == 0) // negative indexing
- sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- else
- add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
- }
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
- uint32_t dummy1;
- uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-// Debugging
-void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::RecordPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
-
-
-void Assembler::WriteRecordedPositions() {
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- }
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // compute new buffer size
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // no overflow
-
- // setup new buffer
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // copy the data
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // switch buffers
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // none of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries
-
- // relocate pending relocation entries
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
- // Adjust code for new modes
- ASSERT(RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // these modes do not need an entry in the constant pool
- } else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info
- BlockConstPoolBefore(pc_offset() + kInstrSize);
- }
- if (rinfo.rmode() != RelocInfo::NONE) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
- }
-
- // If we did not return by now, we need to emit the constant pool soon.
-
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // no_const_pool_before_, which is checked here. Also, recursive calls to
- // CheckConstPool are blocked by no_const_pool_before_.
- if (pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as possible
- next_buffer_check_ = no_const_pool_before_;
-
- // Something is wrong if emission is forced and blocked at the same time
- ASSERT(!force_emit);
- return;
- }
-
- int jump_instr = require_jump ? kInstrSize : 0;
-
- // Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
- // Block recursive calls to CheckConstPool
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
-
- // Emit jump over constant pool if necessary
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker
- // "Undefined instruction" as specified by A3.1 Instruction set encoding
- emit(0x03000000 | num_prinfo_);
-
- // Emit constant pool entries
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset]
- // P and U set, B and W clear, Rn == pc, offset12 still 0
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
- }
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/arm/assembler-thumb2.h b/deps/v8/src/arm/assembler-thumb2.h
deleted file mode 100644
index 31e9487266..0000000000
--- a/deps/v8/src/arm/assembler-thumb2.h
+++ /dev/null
@@ -1,1027 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
-#define V8_ARM_ASSEMBLER_THUMB2_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-// Core register
-struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // (unfortunately we can't make this private in a struct)
- int code_;
-};
-
-
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- int code_;
-};
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
-
-
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // (unfortunately we can't make this private in a struct)
- int code_;
-};
-
-
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
-
-
-// Coprocessor number
-enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
- p10 = 10,
- p11 = 11,
- p12 = 12,
- p13 = 13,
- p14 = 14,
- p15 = 15
-};
-
-
-// Condition field in instructions
-enum Condition {
- eq = 0 << 28, // Z set equal.
- ne = 1 << 28, // Z clear not equal.
- nz = 1 << 28, // Z clear not zero.
- cs = 2 << 28, // C set carry set.
- hs = 2 << 28, // C set unsigned higher or same.
- cc = 3 << 28, // C clear carry clear.
- lo = 3 << 28, // C clear unsigned lower.
- mi = 4 << 28, // N set negative.
- pl = 5 << 28, // N clear positive or zero.
- vs = 6 << 28, // V set overflow.
- vc = 7 << 28, // V clear no overflow.
- hi = 8 << 28, // C set, Z clear unsigned higher.
- ls = 9 << 28, // C clear or Z set unsigned lower or same.
- ge = 10 << 28, // N == V greater or equal.
- lt = 11 << 28, // N != V less than.
- gt = 12 << 28, // Z clear, N == V greater than.
- le = 13 << 28, // Z set or N != V less then or equal
- al = 14 << 28 // always.
-};
-
-
-// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cc;
- };
-}
-
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
- LSL = 0 << 5,
- LSR = 1 << 5,
- ASR = 2 << 5,
- ROR = 3 << 5,
- RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
- SetCC = 1 << 20, // set condition code
- LeaveCC = 0 << 20 // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
- // bit encoding P U W
- Offset = (8|4|0) << 21, // offset (without writeback to base)
- PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
- PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
- NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
- NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
- NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
- // bit encoding P U W
- da = (0|0|0) << 21, // decrement after
- ia = (0|4|0) << 21, // increment after
- db = (8|0|0) << 21, // decrement before
- ib = (8|4|0) << 21, // increment before
- da_w = (0|0|1) << 21, // decrement after with writeback to base
- ia_w = (0|4|1) << 21, // increment after with writeback to base
- db_w = (8|0|1) << 21, // decrement before with writeback to base
- ib_w = (8|4|1) << 21 // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
- Long = 1 << 22, // long load/store coprocessor
- Short = 0 << 22 // short load/store coprocessor
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
- // immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
- INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // rm
- INLINE(explicit Operand(Register rm));
-
- // rm <shift_op> shift_imm
- explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
- // rm <shift_op> rs
- explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- Register rm() const { return rm_; }
-
- private:
- Register rm_;
- Register rs_;
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
- // [rn +/- offset] Offset/NegOffset
- // [rn +/- offset]! PreIndex/NegPreIndex
- // [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
- explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
- // [rn +/- rm] Offset/NegOffset
- // [rn +/- rm]! PreIndex/NegPreIndex
- // [rn], +/- rm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
- // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
- // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
- // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- private:
- Register rn_; // base
- Register rm_; // register offset
- int32_t offset_; // valid if rm_ == no_reg
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
-
- friend class Assembler;
-};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (1u << f)) != 0;
- }
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- explicit Scope(CpuFeature f) {
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= 1u << f;
- }
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
- private:
- unsigned old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- static unsigned supported_;
- static unsigned enabled_;
- static unsigned found_by_runtime_probing_;
-};
-
-
-typedef int32_t Instr;
-
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCPattern;
-
-
-class Assembler : public Malloced {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
- ~Assembler();
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
-
- // Here we are patching the address in the constant pool, not the actual call
- // instruction. The address in the constant pool is the same size as a
- // pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Distance between the instruction referring to the address of the call
- // target (ldr pc, [target addr in const pool]) and the return address
- static const int kCallTargetAddressOffset = kInstrSize;
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 8;
-
- static const int kJSReturnSequenceLength = 4;
-
- // ---------------------------------------------------------------------------
- // Code generation
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
-
- // Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
- void blx(int branch_offset); // v5 and above
- void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
-
- // Convenience branch instructions using labels
- void b(Label* L, Condition cond = al) {
- b(branch_offset(L, cond == al), cond);
- }
- void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
- void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
- void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
- void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
-
- // Data-processing instructions
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- sub(dst, src1, Operand(src2), s, cond);
- }
-
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void tst(Register src1, const Operand& src2, Condition cond = al);
- void tst(Register src1, Register src2, Condition cond = al) {
- tst(src1, Operand(src2), cond);
- }
-
- void teq(Register src1, const Operand& src2, Condition cond = al);
-
- void cmp(Register src1, const Operand& src2, Condition cond = al);
- void cmp(Register src1, Register src2, Condition cond = al) {
- cmp(src1, Operand(src2), cond);
- }
-
- void cmn(Register src1, const Operand& src2, Condition cond = al);
-
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- orr(dst, src1, Operand(src2), s, cond);
- }
-
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
- void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
- mov(dst, Operand(src), s, cond);
- }
-
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
-
- // Multiply instructions
-
- void mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s = LeaveCC, Condition cond = al);
-
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- // Miscellaneous arithmetic instructions
-
- void clz(Register dst, Register src, Condition cond = al); // v5 and above
-
- // Status register access instructions
-
- void mrs(Register dst, SRegister s, Condition cond = al);
- void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
- // Load/Store instructions
- void ldr(Register dst, const MemOperand& src, Condition cond = al);
- void str(Register src, const MemOperand& dst, Condition cond = al);
- void ldrb(Register dst, const MemOperand& src, Condition cond = al);
- void strb(Register src, const MemOperand& dst, Condition cond = al);
- void ldrh(Register dst, const MemOperand& src, Condition cond = al);
- void strh(Register src, const MemOperand& dst, Condition cond = al);
- void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
- void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-
- // Load/Store multiple instructions
- void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
- void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
- // Semaphore instructions
- void swp(Register dst, Register src, Register base, Condition cond = al);
- void swpb(Register dst, Register src, Register base, Condition cond = al);
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg);
-
- void bkpt(uint32_t imm16); // v5 and above
- void swi(uint32_t imm24, Condition cond = al);
-
- // Coprocessor instructions
-
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
-
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2); // v5 and above
-
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short, Condition cond = al);
- void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short); // v5 and above
- void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- // Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
-
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const DwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt(const SwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void vmrs(const Register dst,
- const Condition cond = al);
-
- // Pseudo instructions
- void nop() { mov(r0, Operand(r0)); }
-
- void push(Register src, Condition cond = al) {
- str(src, MemOperand(sp, 4, NegPreIndex), cond);
- }
-
- void pop(Register dst, Condition cond = al) {
- ldr(dst, MemOperand(sp, 4, PostIndex), cond);
- }
-
- void pop() {
- add(sp, sp, Operand(kPointerSize));
- }
-
- // Load effective address of memory operand x into register dst
- void lea(Register dst, const MemOperand& x,
- SBit s = LeaveCC, Condition cond = al);
-
- // Jump unconditionally to given label.
- void jmp(Label* L) { b(L, al); }
-
- // Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
- }
-
- // Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Debugging
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --debug_code to enable.
- void RecordComment(const char* msg);
-
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- void WriteRecordedPositions();
-
- int pc_offset() const { return pc_ - buffer_; }
- int current_position() const { return current_position_; }
- int current_statement_position() const { return current_position_; }
-
- protected:
- int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Read/patch instructions
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
- // Decode branch instruction at pos and return branch target pos
- int target_at(int pos);
-
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
-
- // Check if is time to emit a constant pool for pending reloc info entries
- void CheckConstPool(bool force_emit, bool require_jump);
-
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
- }
-
- private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
- int next_buffer_check_; // pc offset of next buffer check
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
-
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
- // Emission of the constant pool may be blocked in some code sequences
- int no_const_pool_before_; // block emission before this pc offset
-
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // source position information
- int current_position_;
- int current_statement_position_;
- int written_position_;
- int written_statement_position_;
-
- // Code emission
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
-
- // Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
- // Labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- // Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class RegExpMacroAssemblerARM;
- friend class RelocInfo;
- friend class CodePatcher;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 89d974c73d..ea3df6cfbe 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1769,7 +1769,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
@@ -1908,7 +1910,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
+ Result arg_count_reg(r0);
+ __ mov(r0, Operand(1));
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -3656,7 +3660,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3664,7 +3670,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3676,7 +3684,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3726,7 +3736,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3748,7 +3760,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
continue_label.Bind();
break;
}
@@ -3833,7 +3847,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -4219,7 +4235,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
frame_->EmitPush(r0);
break;
}
@@ -5061,10 +5079,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
- __ vcmp(d6, d7);
+ __ fcmp(d6, d7);
__ vmrs(pc);
__ mov(r0, Operand(0), LeaveCC, eq);
__ mov(r0, Operand(1), LeaveCC, lt);
@@ -5127,6 +5145,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
+ __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -5225,6 +5244,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only first argument is a string.
__ bind(&string1);
+ __ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second.
@@ -5236,11 +5256,13 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only second argument is a string.
__ b(&not_strings);
+ __ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
}
+ __ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5331,22 +5353,22 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
if (Token::MUL == operation) {
- __ vmul(d5, d6, d7);
+ __ fmuld(d5, d6, d7);
} else if (Token::DIV == operation) {
- __ vdiv(d5, d6, d7);
+ __ fdivd(d5, d6, d7);
} else if (Token::ADD == operation) {
- __ vadd(d5, d6, d7);
+ __ faddd(d5, d6, d7);
} else if (Token::SUB == operation) {
- __ vsub(d5, d6, d7);
+ __ fsubd(d5, d6, d7);
} else {
UNREACHABLE();
}
- __ vmov(r0, r1, d5);
+ __ fmrrd(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@@ -5435,9 +5457,9 @@ static void GetInt32(MacroAssembler* masm,
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- __ vmov(d7, scratch2, scratch);
- __ vcvt(s15, d7);
- __ vmov(dest, s15);
+ __ fmdrr(d7, scratch2, scratch);
+ __ ftosid(s15, d7);
+ __ fmrs(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@@ -5576,6 +5598,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
+ __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5680,29 +5703,6 @@ static void MultiplyByKnownInt2(
}
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s",
- op_name,
- overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : 0);
- return name_;
-}
-
-
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -5980,6 +5980,7 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
// Enter runtime system.
__ bind(&slow);
__ push(r0);
+ __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(&not_smi);
@@ -6455,6 +6456,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin.
__ bind(&slow);
+ __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index e9f11e9c6e..ba7f93626d 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -455,15 +455,13 @@ class GenericBinaryOpStub : public CodeStub {
: op_(op),
mode_(mode),
constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- name_(NULL) { }
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
private:
Token::Value op_;
OverwriteMode mode_;
int constant_rhs_;
bool specialized_on_rhs_;
- char* name_;
static const int kMaxKnownRhs = 0x40000000;
@@ -508,7 +506,22 @@ class GenericBinaryOpStub : public CodeStub {
return key;
}
- const char* GetName();
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+ }
#ifdef DEBUG
void Print() {
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index afed0fa5c3..2f9e78f534 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -897,14 +897,15 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// void Decoder::DecodeTypeVFP(Instr* instr)
-// vmov: Sn = Rt
-// vmov: Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
+// Implements the following VFP instructions:
+// fmsr: Sn = Rt
+// fmrs: Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
@@ -996,8 +997,8 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 55d87b7c2d..ab636b6b88 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -414,98 +414,78 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(ip);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- }
- break;
-
- case Slot::CONTEXT:
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT(slot != NULL); // No global declarations here.
+
+ // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
+ switch (slot->type()) {
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
+ PropertyAttributes attr = decl->mode() == Variable::VAR ?
+ NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ mov(r0, Operand(Factory::the_hole_value()));
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ Visit(decl->fun()); // Initial value for function decl.
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ mov(r0, Operand(Factory::the_hole_value()));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ }
+ break;
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
+ if (decl->mode() == Variable::CONST) {
+ __ mov(r0, Operand(Factory::the_hole_value()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
- __ ldr(r1,
- CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ ldr(r1, CodeGenerator::ContextOperand(cp,
+ Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
}
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
- // No write barrier since the_hole_value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(r0);
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
- // We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, r0);
- }
- break;
-
- case Slot::LOOKUP: {
- __ mov(r2, Operand(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- } else if (decl->fun() != NULL) {
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
- Visit(decl->fun()); // Initial value for function decl.
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
- } else {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1, CodeGenerator::ContextOperand(cp,
+ Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ RecordWrite(cp, r2, r0);
}
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Value in r0 is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ add(sp, sp, Operand(2 * kPointerSize));
- }
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -521,6 +501,21 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ Expression* expr = stmt->expression();
+ // Complete the statement based on the type of the subexpression.
+ if (expr->AsLiteral() != NULL) {
+ __ mov(r0, Operand(expr->AsLiteral()->handle()));
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(r0);
+ }
+ EmitReturnSequence(stmt->statement_pos());
+}
+
+
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -541,24 +536,18 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
+ Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
- ASSERT(var->is_global());
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
- __ mov(r2, Operand(var->name()));
+ __ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndMove(context, r0);
+ DropAndMove(expr->context(), r0);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@@ -579,7 +568,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
- Move(context, slot, r0);
+ Move(expr->context(), slot, r0);
} else {
// A variable has been rewritten into an explicit access to
// an object property.
@@ -614,7 +603,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(context, r0, 2);
+ DropAndMove(expr->context(), r0, 2);
}
}
@@ -648,15 +637,32 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r2 = literal array (0).
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r2, literal_offset));
+ // Check whether we need to materialize the object literal boilerplate.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(ip));
+ __ b(ne, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ // r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // r0 contains boilerplate.
+ // Clone boilerplate.
+ __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@@ -757,15 +763,32 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ Label make_clone;
+
+ // Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ // Check if the literal's boilerplate has been instantiated.
+ int offset =
+ FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+ __ ldr(r0, FieldMemOperand(r3, offset));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(&make_clone, ne);
+
+ // Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->literals()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+ __ bind(&make_clone);
+ // Clone the boilerplate.
+ __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -837,38 +860,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, r0);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, r0);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE);
- __ CallStub(&stub);
- Move(context, r0);
-}
-
-
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
@@ -981,6 +976,35 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ Property* property = var->rewrite()->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Load object and key onto the stack.
+ Slot* object_slot = property->obj()->AsSlot();
+ ASSERT_NOT_NULL(object_slot);
+ Move(Expression::kValue, object_slot, r0);
+
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ Move(Expression::kValue, key_literal);
+
+ // Value to store was pushed before object and key on the stack.
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+
+ // Arguments to ic is value in r0, object and key on stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ if (expr->context() == Expression::kEffect) {
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ } else if (expr->context() == Expression::kValue) {
+ // Value is still on the stack in esp[2 * kPointerSize]
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ } else {
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ DropAndMove(expr->context(), r0, 3);
+ }
}
}
@@ -1080,9 +1104,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
DropAndMove(expr->context(), r0);
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
- RelocInfo::Mode mode) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1095,7 +1117,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ Call(ic, mode);
+ __ Call(ic, reloc_info);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -1135,7 +1157,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1149,7 +1171,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ mov(r0, Operand(key->handle()));
__ push(r0);
Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@@ -1684,63 +1706,7 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), r0);
-}
-
-
-Register FastCodeGenerator::result_register() { return r0; }
-
-
-Register FastCodeGenerator::context_register() { return cp; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FastCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
- __ push(r1);
-}
-
-
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore result register from stack.
- __ pop(r1);
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
-}
-
-
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
-
#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 0cb7f12302..b0fa13a5a1 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -28,11 +28,7 @@
#include "v8.h"
#include "frames-inl.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2-inl.h"
-#else
#include "arm/assembler-arm-inl.h"
-#endif
namespace v8 {
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index b57aa93967..c56f414a14 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -276,7 +276,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -371,11 +371,13 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -392,7 +394,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(r1, Operand(f));
CEntryStub stub(1);
__ CallStub(&stub);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 876eec109c..aa6570ce11 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -162,21 +162,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
-void MacroAssembler::Drop(int stack_elements, Condition cond) {
- if (stack_elements > 0) {
- add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- bl(target);
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
-}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@@ -643,15 +628,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(r1);
- mov(ip, Operand(ExternalReference(Top::k_handler_address)));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- str(r1, MemOperand(ip));
-}
-
-
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@@ -1018,9 +994,9 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt(d7, s15);
- vmov(outLowReg, outHighReg, d7);
+ fmsr(s15, r7);
+ fsitod(d7, s15);
+ fmrrd(outLowReg, outHighReg, d7);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 88bfa9ce0a..09743290f6 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -64,9 +64,6 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
- void Drop(int stack_elements, Condition cond = al);
- void Call(Label* target);
- void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
@@ -151,9 +148,6 @@ class MacroAssembler: public Assembler {
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f3927720fb..9dc417bb71 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1893,14 +1893,14 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
-// vmov :Sn = Rt
-// vmov :Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
+// fmsr :Sn = Rt
+// fmrs :Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
@@ -2020,8 +2020,8 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 958842d2c8..efccaf4960 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -446,7 +446,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
+void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -454,8 +454,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss,
- Failure** failure) {
+ Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@@ -477,8 +476,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
-
- return true;
}
@@ -777,26 +774,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r2, r3);
- __ b(ne, &miss);
- } else {
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, &miss);
- }
+ __ cmp(r1, Operand(Handle<JSFunction>(function)));
+ __ b(ne, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1024,10 +1003,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
- AccessorInfo* callback) {
+ AccessorInfo* callback,
+ String* name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1036,11 +1015,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ ldr(r0, MemOperand(sp, 0));
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1193,11 +1168,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 132c8aebc1..47ecb96360 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -243,8 +243,11 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
+ Result* arg_count_register,
int arg_count) {
+ ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
+ arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index d5230007a4..457478da92 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -305,6 +305,7 @@ class VirtualFrame : public ZoneObject {
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
+ Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 2d16250ccd..9c9ddcdda6 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -573,16 +573,6 @@ ExternalReference ExternalReference::random_positive_smi_function() {
}
-ExternalReference ExternalReference::keyed_lookup_cache_keys() {
- return ExternalReference(KeyedLookupCache::keys_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
- return ExternalReference(KeyedLookupCache::field_offsets_address());
-}
-
-
ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location());
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 87cde9bdf5..aecd4cd63a 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -401,10 +401,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function();
- // Static data in the keyed lookup cache.
- static ExternalReference keyed_lookup_cache_keys();
- static ExternalReference keyed_lookup_cache_field_offsets();
-
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 195fc14ed4..c27d558a26 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -139,7 +139,6 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
- virtual CompareOperation* AsCompareOperation() { return NULL; }
};
@@ -193,13 +192,13 @@ class Expression: public AstNode {
virtual void MarkAsStatement() { /* do nothing */ }
// Static type information for this expression.
- StaticType* type() { return &type_; }
+ SmiAnalysis* type() { return &type_; }
Context context() { return context_; }
void set_context(Context context) { context_ = context; }
private:
- StaticType type_;
+ SmiAnalysis type_;
Context context_;
};
@@ -1186,7 +1185,7 @@ class CountOperation: public Expression {
class CompareOperation: public Expression {
public:
CompareOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
+ : op_(op), left_(left), right_(right) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1196,18 +1195,10 @@ class CompareOperation: public Expression {
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- // Accessors for flag whether this compare operation is hanging of a for loop.
- bool is_for_loop_condition() const { return is_for_loop_condition_; }
- void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
-
- // Type testing & conversion
- virtual CompareOperation* AsCompareOperation() { return this; }
-
private:
Token::Value op_;
Expression* left_;
Expression* right_;
- bool is_for_loop_condition_;
};
@@ -1250,8 +1241,6 @@ class Assignment: public Expression {
Expression* target() const { return target_; }
Expression* value() const { return value_; }
int position() { return pos_; }
- // This check relies on the definition order of token in token.h.
- bool is_compound() const { return op() > Token::ASSIGN; }
// An initialization block is a series of statments of the form
// x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 6ae31251a9..deda96f316 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -95,8 +95,6 @@ static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
-// This is for delete[]
-static List<char*>* delete_these_arrays_on_tear_down = NULL;
NativesExternalStringResource::NativesExternalStringResource(const char* source)
@@ -152,41 +150,17 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
}
-char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
- char* memory = new char[bytes];
- if (memory != NULL) {
- if (delete_these_arrays_on_tear_down == NULL) {
- delete_these_arrays_on_tear_down = new List<char*>(2);
- }
- delete_these_arrays_on_tear_down->Add(memory);
- }
- return memory;
-}
-
-
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down != NULL) {
int len = delete_these_non_arrays_on_tear_down->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down->at(i);
- delete_these_non_arrays_on_tear_down->at(i) = NULL;
}
delete delete_these_non_arrays_on_tear_down;
delete_these_non_arrays_on_tear_down = NULL;
}
- if (delete_these_arrays_on_tear_down != NULL) {
- int len = delete_these_arrays_on_tear_down->length();
- ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down->at(i);
- delete_these_arrays_on_tear_down->at(i) = NULL;
- }
- delete delete_these_arrays_on_tear_down;
- delete_these_arrays_on_tear_down = NULL;
- }
-
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 7cd3a2bbf8..07d2747b44 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -74,10 +74,6 @@ class Bootstrapper : public AllStatic {
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
static void FreeThreadResources();
-
- // This will allocate a char array that is deleted when V8 is shut down.
- // It should only be used for strictly finite allocations.
- static char* AllocateAutoDeletedArray(int bytes);
};
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 09581aa82a..dbc39ff3bf 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -35,117 +35,82 @@
namespace v8 {
namespace internal {
-bool CodeStub::FindCodeInCache(Code** code_out) {
- if (has_custom_cache()) return GetCustomCache(code_out);
- int index = Heap::code_stubs()->FindEntry(GetKey());
- if (index != NumberDictionary::kNotFound) {
- *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
- return true;
- }
- return false;
-}
-
-
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- Counters::code_stubs.Increment();
- // Nested stubs are not allowed for leafs.
- masm->set_allow_stub_calls(AllowsStubCalls());
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- Generate(masm);
-}
-
-
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- code->set_major_key(MajorKey());
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(code, masm);
-
- LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
- Counters::total_stubs_code_size.Increment(code->instruction_size());
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
-#ifdef DEBUG
- Print();
-#endif
- code->Disassemble(GetName());
- PrintF("\n");
+Handle<Code> CodeStub::GetCode() {
+ bool custom_cache = has_custom_cache();
+
+ int index = 0;
+ uint32_t key = 0;
+ if (custom_cache) {
+ Code* cached;
+ if (GetCustomCache(&cached)) {
+ return Handle<Code>(cached);
+ } else {
+ index = NumberDictionary::kNotFound;
+ }
+ } else {
+ key = GetKey();
+ index = Heap::code_stubs()->FindEntry(key);
+ if (index != NumberDictionary::kNotFound)
+ return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
-#endif
-}
-
-Handle<Code> CodeStub::GetCode() {
- Code* code;
- if (!FindCodeInCache(&code)) {
+ Code* result;
+ {
v8::HandleScope scope;
+ // Update the static counter each time a new code stub is generated.
+ Counters::code_stubs.Increment();
+
// Generate the new code.
MacroAssembler masm(NULL, 256);
- GenerateCode(&masm);
+
+ // Nested stubs are not allowed for leafs.
+ masm.set_allow_stub_calls(AllowsStubCalls());
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ Generate(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
- // Copy the generated code into a heap object.
+ // Copy the generated code into a heap object, and store the major key.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
- Handle<Code> new_object =
- Factory::NewCode(desc, NULL, flags, masm.CodeObject());
- RecordCodeGeneration(*new_object, &masm);
+ Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+ code->set_major_key(MajorKey());
- if (has_custom_cache()) {
- SetCustomCache(*new_object);
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, &masm);
+
+ LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
+ Counters::total_stubs_code_size.Increment(code->instruction_size());
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code_stubs) {
+#ifdef DEBUG
+ Print();
+#endif
+ code->Disassemble(GetName());
+ PrintF("\n");
+ }
+#endif
+
+ if (custom_cache) {
+ SetCustomCache(*code);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
- GetKey(),
- new_object);
+ key,
+ code);
Heap::public_set_code_stubs(*dict);
}
- code = *new_object;
- }
-
- return Handle<Code>(code);
-}
-
-
-Object* CodeStub::TryGetCode() {
- Code* code;
- if (!FindCodeInCache(&code)) {
- // Generate the new code.
- MacroAssembler masm(NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Try to copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
- Object* new_object =
- Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
- if (new_object->IsFailure()) return new_object;
- code = Code::cast(new_object);
- RecordCodeGeneration(code, &masm);
-
- if (has_custom_cache()) {
- SetCustomCache(code);
- } else {
- // Try to update the code cache but do not fail if unable.
- new_object = Heap::code_stubs()->AtNumberPut(GetKey(), code);
- if (!new_object->IsFailure()) {
- Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
- }
- }
+ result = *code;
}
- return code;
+ return Handle<Code>(result);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index fee92b9513..25a2d0f551 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -43,9 +43,6 @@ namespace internal {
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastCloneShallowArray) \
V(UnarySub) \
V(RevertToNumber) \
V(ToBoolean) \
@@ -86,11 +83,6 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
- // Retrieve the code for the stub if already generated. Do not
- // generate the code if not already generated and instead return a
- // retry after GC Failure object.
- Object* TryGetCode();
-
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
};
@@ -112,20 +104,9 @@ class CodeStub BASE_EMBEDDED {
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
-
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
- // Perform bookkeeping required after code generation when stub code is
- // initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
-
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index b3cf51ed5d..85a08d59e5 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -233,55 +233,6 @@ class StackCheckStub : public CodeStub {
};
-class FastNewClosureStub : public CodeStub {
- public:
- void Generate(MacroAssembler* masm);
-
- private:
- const char* GetName() { return "FastNewClosureStub"; }
- Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewContextStub : public CodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- const char* GetName() { return "FastNewContextStub"; }
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastCloneShallowArrayStub : public CodeStub {
- public:
- static const int kMaximumLength = 8;
-
- explicit FastCloneShallowArrayStub(int length) : length_(length) {
- ASSERT(length >= 0 && length <= kMaximumLength);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int length_;
-
- const char* GetName() { return "FastCloneShallowArrayStub"; }
- Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() { return length_; }
-};
-
-
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 03771d9c4a..22b0a03c6a 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -56,8 +56,6 @@ class CodeGenSelector: public AstVisitor {
private:
// Visit an expression in a given expression context.
void ProcessExpression(Expression* expr, Expression::Context context) {
- ASSERT(expr->context() == Expression::kUninitialized ||
- expr->context() == context);
Expression::Context saved = context_;
context_ = context;
Visit(expr);
@@ -598,7 +596,7 @@ CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Slot* slot = scope->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (FLAG_trace_bailout) {
- PrintF("Function has context-allocated parameters.\n");
+ PrintF("function has context-allocated parameters");
}
return NORMAL;
}
@@ -647,18 +645,6 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- // Property rewrites are shared, ensure we are not changing its
- // expression context state.
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
- ProcessExpression(prop->obj(), Expression::kValue);
- ProcessExpression(prop->key(), Expression::kValue);
- }
-
if (decl->fun() != NULL) {
ProcessExpression(decl->fun(), Expression::kValue);
}
@@ -690,10 +676,12 @@ void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
+ BAILOUT("ContinueStatement");
}
void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
+ BAILOUT("BreakStatement");
}
@@ -703,12 +691,12 @@ void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kValue);
+ BAILOUT("WithEnterStatement");
}
void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
- // Supported.
+ BAILOUT("WithExitStatement");
}
@@ -736,7 +724,21 @@ void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ CHECK_BAILOUT;
+ }
+ if (stmt->cond() != NULL) {
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ }
+ Visit(stmt->body());
+ if (stmt->next() != NULL) {
+ CHECK_BAILOUT;
+ Visit(stmt->next());
+ }
}
@@ -751,9 +753,7 @@ void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
+ BAILOUT("TryFinallyStatement");
}
@@ -885,22 +885,34 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
// non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+ if (op != Token::ASSIGN && op != Token::INIT_VAR) {
+ BAILOUT("compound assignment");
+ }
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
// All global variables are supported.
if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("Lookup slot");
+ if (var->slot() == NULL) {
+ Property* property = var->AsProperty();
+ if (property == NULL) {
+ BAILOUT("non-global/non-slot/non-property assignment");
+ }
+ if (property->obj()->AsSlot() == NULL) {
+ BAILOUT("variable rewritten to property non slot object assignment");
+ }
+ if (property->key()->AsLiteral() == NULL) {
+ BAILOUT("variable rewritten to property non literal key assignment");
+ }
+ } else {
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
}
}
} else if (prop != NULL) {
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
@@ -911,8 +923,6 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
if (lit == NULL ||
!lit->handle()->IsSymbol() ||
String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
@@ -1101,14 +1111,14 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- ProcessExpression(expr->left(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), Expression::kValue);
+ ProcessExpression(expr->left(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), Expression::kValue);
}
void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
- // ThisFunction is supported.
+ BAILOUT("ThisFunction");
}
#undef BAILOUT
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 8a50864266..2f646a5638 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -30,7 +30,6 @@
#include "v8.h"
#include "api.h"
-#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "simulator.h"
@@ -608,11 +607,6 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value();
}
- // Ignore debug break during bootstrapping.
- if (Bootstrapper::IsActive()) {
- return Heap::undefined_value();
- }
-
{
JavaScriptFrameIterator it;
ASSERT(!it.done());
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 2a80953eb1..83775ef65d 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -284,8 +284,7 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Map> function_map,
- PretenureFlag pretenure) {
+ Handle<Map> function_map) {
ASSERT(boilerplate->IsBoilerplate());
ASSERT(!boilerplate->has_initial_map());
ASSERT(!boilerplate->has_prototype());
@@ -293,22 +292,20 @@ Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
boilerplate->shared(),
- Heap::the_hole_value(),
- pretenure),
+ Heap::the_hole_value()),
JSFunction);
}
Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Context> context,
- PretenureFlag pretenure) {
- Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
- boilerplate, Top::function_map(), pretenure);
+ Handle<Context> context) {
+ Handle<JSFunction> result =
+ BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map());
result->set_context(*context);
int number_of_literals = boilerplate->NumberOfLiterals();
Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, pretenure);
+ Factory::NewFixedArray(number_of_literals, TENURED);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index fd277f20d3..951c0439b0 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -219,8 +219,7 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Context> context,
- PretenureFlag pretenure = TENURED);
+ Handle<Context> context);
static Handle<Code> NewCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
@@ -375,8 +374,7 @@ class Factory : public AllStatic {
static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Map> function_map,
- PretenureFlag pretenure);
+ Handle<Map> function_map);
// Create a new map cache.
static Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc
index b15a673552..20de808530 100644
--- a/deps/v8/src/fast-codegen.cc
+++ b/deps/v8/src/fast-codegen.cc
@@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
+#define __ ACCESS_MASM(masm_)
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
@@ -232,10 +232,8 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
void FastCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
- Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
VisitStatements(stmt->statements());
- __ bind(nested_statement.break_target());
}
@@ -280,88 +278,22 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsContinueTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Iteration* loop = current->AsIteration();
- __ jmp(loop->continue_target());
+ UNREACHABLE();
}
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsBreakTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Breakable* target = current->AsBreakable();
- __ jmp(target->break_target());
-}
-
-
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- Expression* expr = stmt->expression();
- // Complete the statement based on the type of the subexpression.
- if (expr->AsLiteral() != NULL) {
- __ Move(result_register(), expr->AsLiteral()->handle());
- } else {
- ASSERT_EQ(Expression::kValue, expr->context());
- Visit(expr);
- __ pop(result_register());
- }
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (current != NULL) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- EmitReturnSequence(stmt->statement_pos());
+ UNREACHABLE();
}
-
-
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
- SetStatementPosition(stmt);
-
- Visit(stmt->expression());
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- __ CallRuntime(Runtime::kPushContext, 1);
- }
- // Both runtime calls return the new context in both the context and the
- // result registers.
-
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ UNREACHABLE();
}
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
- SetStatementPosition(stmt);
-
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ UNREACHABLE();
}
@@ -372,10 +304,8 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
- Label body, stack_limit_hit, stack_check_success;
-
- Iteration loop_statement(this, stmt);
increment_loop_depth();
+ Label body, exit, stack_limit_hit, stack_check_success;
__ bind(&body);
Visit(stmt->body());
@@ -386,11 +316,10 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
- __ bind(loop_statement.continue_target());
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
- false_label_ = loop_statement.break_target();
+ false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
@@ -401,7 +330,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
+ __ bind(&exit);
decrement_loop_depth();
}
@@ -409,18 +338,16 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
- Label body, stack_limit_hit, stack_check_success;
-
- Iteration loop_statement(this, stmt);
increment_loop_depth();
+ Label test, body, exit, stack_limit_hit, stack_check_success;
// Emit the test at the bottom of the loop.
- __ jmp(loop_statement.continue_target());
+ __ jmp(&test);
__ bind(&body);
Visit(stmt->body());
- __ bind(loop_statement.continue_target());
+ __ bind(&test);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
@@ -430,7 +357,7 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
- false_label_ = loop_statement.break_target();
+ false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
@@ -441,13 +368,55 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
+ __ bind(&exit);
+
decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ ForStatement");
+ Label test, body, exit, stack_limit_hit, stack_check_success;
+ if (stmt->init() != NULL) Visit(stmt->init());
+
+ increment_loop_depth();
+ // Emit the test at the bottom of the loop (even if empty).
+ __ jmp(&test);
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ if (stmt->next() != NULL) Visit(stmt->next());
+
+ __ bind(&test);
+
+ if (stmt->cond() == NULL) {
+ // For an empty test jump to the top of the loop.
+ __ jmp(&body);
+ } else {
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+ }
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+ decrement_loop_depth();
}
@@ -462,63 +431,7 @@ void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. by exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chaing and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
- Label finally_entry;
- Label try_handler_setup;
-
- // Setup the try-handler chain. Use a call to
- // Jump to try-handler setup and try-block code. Use call to put try-handler
- // address on stack.
- __ Call(&try_handler_setup);
- // Try handler code. Return address of call is pushed on handler stack.
- {
- // This code is only executed during stack-handler traversal when an
- // exception is thrown. The execption is in the result register, which
- // is retained by the finally block.
- // Call the finally block and then rethrow the exception.
- __ Call(&finally_entry);
- ThrowException();
- }
-
- __ bind(&finally_entry);
- {
- // Finally block implementation.
- EnterFinallyBlock();
- Finally finally_block(this);
- Visit(stmt->finally_block());
- ExitFinallyBlock(); // Return to the calling code.
- }
-
- __ bind(&try_handler_setup);
- {
- // Setup try handler (stack pointer registers).
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- TryFinally try_block(this, &finally_entry);
- VisitStatements(stmt->try_block()->statements());
- __ PopTryHandler();
- }
- // Execute the finally block on the way out.
- __ Call(&finally_entry);
+ UNREACHABLE();
}
@@ -587,79 +500,40 @@ void FastCodeGenerator::VisitLiteral(Literal* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
// Record source code position of the (possible) IC call.
SetSourcePosition(expr->position());
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
+ Expression* rhs = expr->value();
+ // Left-hand side can only be a property, a global or a (parameter or
+ // local) slot.
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- break;
- case KEYED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ if (var != NULL) {
+ Visit(rhs);
+ ASSERT_EQ(Expression::kValue, rhs->context());
+ EmitVariableAssignment(expr);
+ } else if (prop != NULL) {
+ // Assignment to a property.
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ // Use the expression context of the key subexpression to detect whether
+ // we have decided to us a named or keyed IC.
+ if (prop->key()->context() == Expression::kUninitialized) {
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ Visit(rhs);
+ ASSERT_EQ(Expression::kValue, rhs->context());
+ EmitNamedPropertyAssignment(expr);
+ } else {
Visit(prop->key());
ASSERT_EQ(Expression::kValue, prop->key()->context());
- break;
- }
-
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
- // Note: Relies on kValue context being 'stack'.
- if (expr->is_compound()) {
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop, Expression::kValue);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(Expression::kValue);
- break;
- }
- }
-
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- ASSERT_EQ(Expression::kValue, rhs->context());
- Visit(rhs);
-
- // If we have a compount assignment: Apply operator.
- if (expr->is_compound()) {
- EmitCompoundAssignmentOp(expr->binary_op(), Expression::kValue);
- }
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
+ Visit(rhs);
+ ASSERT_EQ(Expression::kValue, rhs->context());
EmitKeyedPropertyAssignment(expr);
- break;
+ }
+ } else {
+ UNREACHABLE();
}
}
@@ -674,20 +548,8 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
}
-int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- __ Call(finally_entry_);
- return 0;
-}
-
-
-int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- return 0;
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
}
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
index 54f0df1152..9b262a7393 100644
--- a/deps/v8/src/fast-codegen.h
+++ b/deps/v8/src/fast-codegen.h
@@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
-// -----------------------------------------------------------------------------
-// Fast code generator.
class FastCodeGenerator: public AstVisitor {
public:
@@ -45,7 +43,6 @@ class FastCodeGenerator: public AstVisitor {
function_(NULL),
script_(script),
is_eval_(is_eval),
- nesting_stack_(NULL),
loop_depth_(0),
true_label_(NULL),
false_label_(NULL) {
@@ -58,159 +55,6 @@ class FastCodeGenerator: public AstVisitor {
void Generate(FunctionLiteral* fun);
private:
- class Breakable;
- class Iteration;
- class TryCatch;
- class TryFinally;
- class Finally;
- class ForIn;
-
- class NestedStatement BASE_EMBEDDED {
- public:
- explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
- // Link into codegen's nesting stack.
- previous_ = codegen->nesting_stack_;
- codegen->nesting_stack_ = this;
- }
- virtual ~NestedStatement() {
- // Unlink from codegen's nesting stack.
- ASSERT_EQ(this, codegen_->nesting_stack_);
- codegen_->nesting_stack_ = previous_;
- }
-
- virtual Breakable* AsBreakable() { return NULL; }
- virtual Iteration* AsIteration() { return NULL; }
- virtual TryCatch* AsTryCatch() { return NULL; }
- virtual TryFinally* AsTryFinally() { return NULL; }
- virtual Finally* AsFinally() { return NULL; }
- virtual ForIn* AsForIn() { return NULL; }
-
- virtual bool IsContinueTarget(Statement* target) { return false; }
- virtual bool IsBreakTarget(Statement* target) { return false; }
-
- // Generate code to leave the nested statement. This includes
- // cleaning up any stack elements in use and restoring the
- // stack to the expectations of the surrounding statements.
- // Takes a number of stack elements currently on top of the
- // nested statement's stack, and returns a number of stack
- // elements left on top of the surrounding statement's stack.
- // The generated code must preserve the result register (which
- // contains the value in case of a return).
- virtual int Exit(int stack_depth) {
- // Default implementation for the case where there is
- // nothing to clean up.
- return stack_depth;
- }
- NestedStatement* outer() { return previous_; }
- protected:
- MacroAssembler* masm() { return codegen_->masm(); }
- private:
- FastCodeGenerator* codegen_;
- NestedStatement* previous_;
- DISALLOW_COPY_AND_ASSIGN(NestedStatement);
- };
-
- class Breakable : public NestedStatement {
- public:
- Breakable(FastCodeGenerator* codegen,
- BreakableStatement* break_target)
- : NestedStatement(codegen),
- target_(break_target) {}
- virtual ~Breakable() {}
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* statement) {
- return target_ == statement;
- }
- BreakableStatement* statement() { return target_; }
- Label* break_target() { return &break_target_label_; }
- private:
- BreakableStatement* target_;
- Label break_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Breakable);
- };
-
- class Iteration : public Breakable {
- public:
- Iteration(FastCodeGenerator* codegen,
- IterationStatement* iteration_statement)
- : Breakable(codegen, iteration_statement) {}
- virtual ~Iteration() {}
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* statement) {
- return this->statement() == statement;
- }
- Label* continue_target() { return &continue_target_label_; }
- private:
- Label continue_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Iteration);
- };
-
- // The environment inside the try block of a try/catch statement.
- class TryCatch : public NestedStatement {
- public:
- explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
- : NestedStatement(codegen), catch_entry_(catch_entry) { }
- virtual ~TryCatch() {}
- virtual TryCatch* AsTryCatch() { return this; }
- Label* catch_entry() { return catch_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* catch_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryCatch);
- };
-
- // The environment inside the try block of a try/finally statement.
- class TryFinally : public NestedStatement {
- public:
- explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
- : NestedStatement(codegen), finally_entry_(finally_entry) { }
- virtual ~TryFinally() {}
- virtual TryFinally* AsTryFinally() { return this; }
- Label* finally_entry() { return finally_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* finally_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryFinally);
- };
-
- // A FinallyEnvironment represents being inside a finally block.
- // Abnormal termination of the finally block needs to clean up
- // the block's parameters from the stack.
- class Finally : public NestedStatement {
- public:
- explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
- virtual ~Finally() {}
- virtual Finally* AsFinally() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kFinallyStackElementCount;
- }
- private:
- // Number of extra stack slots occupied during a finally block.
- static const int kFinallyStackElementCount = 2;
- DISALLOW_COPY_AND_ASSIGN(Finally);
- };
-
- // A ForInEnvironment represents being inside a for-in loop.
- // Abnormal termination of the for-in block needs to clean up
- // the block's temporary storage from the stack.
- class ForIn : public Iteration {
- public:
- ForIn(FastCodeGenerator* codegen,
- ForInStatement* statement)
- : Iteration(codegen, statement) { }
- virtual ~ForIn() {}
- virtual ForIn* AsForIn() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kForInStackElementCount;
- }
- private:
- // TODO(lrn): Check that this value is correct when implementing
- // for-in.
- static const int kForInStackElementCount = 5;
- DISALLOW_COPY_AND_ASSIGN(ForIn);
- };
-
-
int SlotOffset(Slot* slot);
void Move(Expression::Context destination, Register source);
void Move(Expression::Context destination, Slot* source, Register scratch);
@@ -240,25 +84,10 @@ class FastCodeGenerator: public AstVisitor {
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
-
- // Platform-specific code for loading variables.
- void EmitVariableLoad(Variable* expr, Expression::Context context);
+ void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
// Platform-specific support for compiling assignments.
- // Load a value from a named property and push the result on the stack.
- // The receiver is left on the stack by the IC.
- void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
-
- // Load a value from a named property and push the result on the stack.
- // The receiver and the key is left on the stack by the IC.
- void EmitKeyedPropertyLoad(Expression::Context context);
-
- // Apply the compound assignment operator. Expects both operands on top
- // of the stack.
- void EmitCompoundAssignmentOp(Token::Value op, Expression::Context context);
-
// Complete a variable assignment. The right-hand-side value is expected
// on top of the stack.
void EmitVariableAssignment(Assignment* expr);
@@ -276,12 +105,6 @@ class FastCodeGenerator: public AstVisitor {
void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos);
- // Non-local control flow support.
- void EnterFinallyBlock();
- void ExitFinallyBlock();
- void ThrowException();
-
- // Loop nesting counter.
int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; }
void decrement_loop_depth() {
@@ -289,22 +112,11 @@ class FastCodeGenerator: public AstVisitor {
loop_depth_--;
}
- MacroAssembler* masm() { return masm_; }
- static Register result_register();
- static Register context_register();
-
- // Set fields in the stack frame. Offsets are the frame pointer relative
- // offsets defined in, e.g., StandardFrameConstants.
- void StoreToFrameField(int frame_offset, Register value);
-
- // Load a value from the current context. Indices are defined as an enum
- // in v8::internal::Context.
- void LoadContextField(Register dst, int context_index);
-
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
@@ -313,14 +125,11 @@ class FastCodeGenerator: public AstVisitor {
Handle<Script> script_;
bool is_eval_;
Label return_label_;
- NestedStatement* nesting_stack_;
int loop_depth_;
Label* true_label_;
Label* false_label_;
- friend class NestedStatement;
-
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index e4bb925f0e..f3b2b0c50c 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -168,12 +168,6 @@ class GlobalHandles::Node : public Malloced {
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
- // Check that we are not passing a finalized external string to
- // the callback.
- ASSERT(!object_->IsExternalAsciiString() ||
- ExternalAsciiString::cast(object_)->resource() != NULL);
- ASSERT(!object_->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@@ -442,15 +436,15 @@ void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->near_death_global_handle_count = 0;
*stats->destroyed_global_handle_count = 0;
for (Node* current = head_; current != NULL; current = current->next()) {
- *stats->global_handle_count += 1;
+ *stats->global_handle_count++;
if (current->state_ == Node::WEAK) {
- *stats->weak_global_handle_count += 1;
+ *stats->weak_global_handle_count++;
} else if (current->state_ == Node::PENDING) {
- *stats->pending_global_handle_count += 1;
+ *stats->pending_global_handle_count++;
} else if (current->state_ == Node::NEAR_DEATH) {
- *stats->near_death_global_handle_count += 1;
+ *stats->near_death_global_handle_count++;
} else if (current->state_ == Node::DESTROYED) {
- *stats->destroyed_global_handle_count += 1;
+ *stats->destroyed_global_handle_count++;
}
}
}
@@ -513,4 +507,5 @@ void GlobalHandles::RemoveObjectGroups() {
object_groups->Clear();
}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index a2cd2e454f..ad0539f460 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -145,14 +145,6 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-// Desired alignment for maps.
-#if V8_HOST_ARCH_64_BIT
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
-#else
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
-#endif
-const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
-const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Tag information for Failure.
const int kFailureTag = 3;
@@ -182,11 +174,6 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
-
-
// Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
@@ -307,7 +294,7 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
+enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
// A CodeDesc describes a buffer holding instructions and relocation
@@ -463,10 +450,6 @@ enum StateTag {
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
-#define MAP_SIZE_ALIGN(value) \
- (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
-
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 00fad547c5..eccd5ee2d3 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -109,19 +109,6 @@ Object* Heap::NumberFromUint32(uint32_t value) {
}
-void Heap::FinalizeExternalString(String* string) {
- ASSERT(string->IsExternalString());
- v8::String::ExternalStringResourceBase** resource_addr =
- reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) +
- ExternalString::kResourceOffset -
- kHeapObjectTag);
- delete *resource_addr;
- // Clear the resource pointer in the string.
- *resource_addr = NULL;
-}
-
-
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
@@ -129,12 +116,6 @@ Object* Heap::AllocateRawMap() {
#endif
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
-#ifdef DEBUG
- if (!result->IsFailure()) {
- // Maps have their own alignment.
- CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
- }
-#endif
return result;
}
@@ -340,56 +321,6 @@ inline bool Heap::allow_allocation(bool new_state) {
#endif
-void ExternalStringTable::AddString(String* string) {
- ASSERT(string->IsExternalString());
- if (Heap::InNewSpace(string)) {
- new_space_strings_.Add(string);
- } else {
- old_space_strings_.Add(string);
- }
-}
-
-
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
- if (!new_space_strings_.is_empty()) {
- Object** start = &new_space_strings_[0];
- v->VisitPointers(start, start + new_space_strings_.length());
- }
- if (!old_space_strings_.is_empty()) {
- Object** start = &old_space_strings_[0];
- v->VisitPointers(start, start + old_space_strings_.length());
- }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void ExternalStringTable::Verify() {
-#ifdef DEBUG
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- ASSERT(Heap::InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
- }
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
- }
-#endif
-}
-
-
-void ExternalStringTable::AddOldString(String* string) {
- ASSERT(string->IsExternalString());
- ASSERT(!Heap::InNewSpace(string));
- old_space_strings_.Add(string);
-}
-
-
-void ExternalStringTable::ShrinkNewStrings(int position) {
- new_space_strings_.Rewind(position);
- Verify();
-}
-
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index b61505562b..bd1cd2d9cf 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -667,9 +667,8 @@ void ProducerHeapProfile::Setup() {
can_log_ = true;
}
-void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
- ASSERT(FLAG_log_producers);
- if (!can_log_) return;
+void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) {
+ if (!can_log_ || !FLAG_log_producers) return;
int framesCount = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
++framesCount;
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index c615942bf7..f8cb04dafc 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -261,12 +261,8 @@ class RetainerHeapProfile BASE_EMBEDDED {
class ProducerHeapProfile : public AllStatic {
public:
static void Setup();
- static void RecordJSObjectAllocation(Object* obj) {
- if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
- }
-
+ static void RecordJSObjectAllocation(Object* obj);
private:
- static void DoRecordJSObjectAllocation(Object* obj);
static bool can_log_;
};
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 7a66038b6d..4e4cd1c051 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -733,7 +733,7 @@ void Heap::Scavenge() {
ScavengeVisitor scavenge_visitor;
// Copy roots.
- IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ IterateRoots(&scavenge_visitor, VISIT_ALL);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@@ -753,63 +753,6 @@ void Heap::Scavenge() {
}
}
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
- ScavengeExternalStringTable();
- ASSERT(new_space_front == new_space_.top());
-
- // Set age mark.
- new_space_.set_age_mark(new_space_.top());
-
- // Update how much has survived scavenge.
- survived_since_last_expansion_ +=
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
-
- LOG(ResourceEvent("scavenge", "end"));
-
- gc_state_ = NOT_IN_GC;
-}
-
-
-void Heap::ScavengeExternalStringTable() {
- ExternalStringTable::Verify();
-
- if (ExternalStringTable::new_space_strings_.is_empty()) return;
-
- Object** start = &ExternalStringTable::new_space_strings_[0];
- Object** end = start + ExternalStringTable::new_space_strings_.length();
- Object** last = start;
-
- for (Object** p = start; p < end; ++p) {
- ASSERT(Heap::InFromSpace(*p));
- MapWord first_word = HeapObject::cast(*p)->map_word();
-
- if (!first_word.IsForwardingAddress()) {
- // Unreachable external string can be finalized.
- FinalizeExternalString(String::cast(*p));
- continue;
- }
-
- // String is still reachable.
- String* target = String::cast(first_word.ToForwardingAddress());
- ASSERT(target->IsExternalString());
-
- if (Heap::InNewSpace(target)) {
- // String is still in new space. Update the table entry.
- *last = target;
- ++last;
- } else {
- // String got promoted. Move it to the old string list.
- ExternalStringTable::AddOldString(target);
- }
- }
-
- ExternalStringTable::ShrinkNewStrings(last - start);
-}
-
-
-Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front) {
do {
ASSERT(new_space_front <= new_space_.top());
@@ -818,7 +761,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- object->Iterate(scavenge_visitor);
+ object->Iterate(&scavenge_visitor);
new_space_front += object->Size();
}
@@ -840,7 +783,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
- target->Iterate(scavenge_visitor);
+ target->Iterate(&scavenge_visitor);
UpdateRSet(target);
}
@@ -848,7 +791,16 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top());
- return new_space_front;
+ // Set age mark.
+ new_space_.set_age_mark(new_space_.top());
+
+ // Update how much has survived scavenge.
+ survived_since_last_expansion_ +=
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
+ LOG(ResourceEvent("scavenge", "end"));
+
+ gc_state_ = NOT_IN_GC;
}
@@ -1142,13 +1094,6 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
-
- // If the map object is aligned fill the padding area with Smi 0 objects.
- if (Map::kPadStart < Map::kSize) {
- memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
- 0,
- Map::kSize - Map::kPadStart);
- }
return map;
}
@@ -2240,11 +2185,8 @@ Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
Object* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure) {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- Object* result = Allocate(function_map, space);
+ Object* prototype) {
+ Object* result = Allocate(function_map, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
@@ -2261,14 +2203,10 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate =
Top::context()->global_context()->arguments_boilerplate();
- // Check that the size of the boilerplate matches our
- // expectations. The ArgumentsAccessStub::GenerateNewObject relies
- // on the size being a known constant.
- ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
-
- // Do the allocation.
- Object* result =
- AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
+ // Make the clone.
+ Map* map = boilerplate->map();
+ int object_size = map->instance_size();
+ Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
// Copy the content. The arguments boilerplate doesn't have any
@@ -2276,7 +2214,7 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
- kArgumentsObjectSize);
+ object_size);
// Set the two properties.
JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
@@ -3237,11 +3175,6 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- // Scavenge collections have special processing for this.
- ExternalStringTable::Iterate(v);
- }
- v->Synchronize("external_string_table");
}
@@ -3270,12 +3203,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
HandleScopeImplementer::Iterate(v);
v->Synchronize("handlescope");
- // Iterate over the builtin code objects and code stubs in the
- // heap. Note that it is not necessary to iterate over code objects
- // on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- Builtins::IterateBuiltins(v);
- }
+ // Iterate over the builtin code objects and code stubs in the heap. Note
+ // that it is not strictly necessary to iterate over code objects on
+ // scavenge collections. We still do it here because this same function
+ // is used by the mark-sweep collector and the deserializer.
+ Builtins::IterateBuiltins(v);
v->Synchronize("builtins");
// Iterate over global handles.
@@ -3492,8 +3424,6 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
GlobalHandles::TearDown();
- ExternalStringTable::TearDown();
-
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@@ -3909,8 +3839,8 @@ class MarkRootVisitor: public ObjectVisitor {
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
- search_target = target;
+void Heap::TracePathToObject() {
+ search_target = NULL;
search_for_any_global = false;
MarkRootVisitor root_visitor;
@@ -3977,8 +3907,8 @@ const char* GCTracer::CollectorString() {
int KeyedLookupCache::Hash(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
- return (addr_hash ^ name->Hash()) & kCapacityMask;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
+ return (addr_hash ^ name->Hash()) % kLength;
}
@@ -4061,35 +3991,4 @@ void TranscendentalCache::Clear() {
}
-void ExternalStringTable::CleanUp() {
- int last = 0;
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- if (Heap::InNewSpace(new_space_strings_[i])) {
- new_space_strings_[last++] = new_space_strings_[i];
- } else {
- old_space_strings_.Add(new_space_strings_[i]);
- }
- }
- new_space_strings_.Rewind(last);
- last = 0;
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
- old_space_strings_[last++] = old_space_strings_[i];
- }
- old_space_strings_.Rewind(last);
- Verify();
-}
-
-
-void ExternalStringTable::TearDown() {
- new_space_strings_.Free();
- old_space_strings_.Free();
-}
-
-
-List<Object*> ExternalStringTable::new_space_strings_;
-List<Object*> ExternalStringTable::old_space_strings_;
-
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index fdc04a8ea9..b37fe4b5b8 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -487,12 +487,9 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
+ Object* prototype);
// Indicies for direct access into argument objects.
- static const int kArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
static const int arguments_callee_index = 0;
static const int arguments_length_index = 1;
@@ -569,10 +566,6 @@ class Heap : public AllStatic {
static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
- // Finalizes an external string by deleting the associated external
- // data and clearing the resource pointer.
- static inline void FinalizeExternalString(String* string);
-
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -785,7 +778,7 @@ class Heap : public AllStatic {
return disallow_allocation_failure_;
}
- static void TracePathToObject(Object* target);
+ static void TracePathToObject();
static void TracePathToGlobal();
#endif
@@ -893,7 +886,7 @@ class Heap : public AllStatic {
// The number of MapSpace pages is limited by the way we pack
// Map pointers during GC.
static const int kMaxMapSpaceSize =
- (1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
+ (1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
@@ -1046,9 +1039,6 @@ class Heap : public AllStatic {
// Performs a minor collection in new generation.
static void Scavenge();
- static void ScavengeExternalStringTable();
- static Address DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front);
// Performs a major collection in the whole heap.
static void MarkCompact(GCTracer* tracer);
@@ -1303,35 +1293,19 @@ class KeyedLookupCache {
// Clear the cache.
static void Clear();
-
- static const int kLength = 64;
- static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 2;
-
private:
static inline int Hash(Map* map, String* name);
-
- // Get the address of the keys and field_offsets arrays. Used in
- // generated code to perform cache lookups.
- static Address keys_address() {
- return reinterpret_cast<Address>(&keys_);
- }
-
- static Address field_offsets_address() {
- return reinterpret_cast<Address>(&field_offsets_);
- }
-
+ static const int kLength = 64;
struct Key {
Map* map;
String* name;
};
static Key keys_[kLength];
static int field_offsets_[kLength];
-
- friend class ExternalReference;
};
+
// Cache for mapping (array, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
@@ -1649,39 +1623,6 @@ class TranscendentalCache {
};
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable : public AllStatic {
- public:
- // Registers an external string.
- inline static void AddString(String* string);
-
- inline static void Iterate(ObjectVisitor* v);
-
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- static void CleanUp();
-
- // Destroys all allocated memory.
- static void TearDown();
-
- private:
- friend class Heap;
-
- inline static void Verify();
-
- inline static void AddOldString(String* string);
-
- // Notifies the table that only a prefix of the new list is valid.
- inline static void ShrinkNewStrings(int position);
-
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- static List<Object*> new_space_strings_;
- static List<Object*> old_space_strings_;
-};
-
} } // namespace v8::internal
#endif // V8_HEAP_H_
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a0236d0264..d6f555082a 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2004,17 +2004,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 21fa0d5a00..662ebc9022 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -745,7 +745,6 @@ class Assembler : public Malloced {
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index f4dd2f931d..a164cfa85c 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -472,38 +472,35 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done);
}
- // 4. Check that the function really is a function.
- { Label done;
- __ test(edi, Operand(edi));
- __ j(not_zero, &done, taken);
- __ xor_(ebx, Operand(ebx));
- // CALL_NON_FUNCTION will expect to find the non-function callee on the
- // expression stack of the caller. Transfer it from receiver to the
- // caller's expression stack (and make the first argument the receiver
- // for CALL_NON_FUNCTION) by decrementing the argument count.
- __ dec(eax);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
- __ bind(&done);
- }
-
- // 5. Shift arguments and return address one slot down on the stack
- // (overwriting the receiver).
+ // 4. Shift stuff one slot down the stack.
{ Label loop;
- __ mov(ecx, eax);
+ __ lea(ecx, Operand(eax, +1)); // +1 ~ copy receiver too
__ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx);
- __ j(not_sign, &loop);
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
+ __ j(not_zero, &loop);
}
- // 6. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing.
- { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ // 5. Remove TOS (copy of last arguments), but keep return address.
+ __ pop(ebx);
+ __ pop(ecx);
+ __ push(ebx);
+ __ dec(eax);
+
+ // 6. Check that function really was a function and get the code to
+ // call from the function and check that the number of expected
+ // arguments matches what we're providing.
+ { Label invoke;
+ __ test(edi, Operand(edi));
+ __ j(not_zero, &invoke, taken);
+ __ xor_(ebx, Operand(ebx));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&invoke);
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 72979c6794..7c8ff31f60 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -174,19 +174,12 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_is_shadowed_ = false;
// Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
+ if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
// Get outer context and create a new context based on it.
frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
// Update context local.
frame_->SaveContextRegister();
@@ -770,27 +763,19 @@ class FloatingPointHelper : public AllStatic {
const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "");
- return name_;
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
}
@@ -818,88 +803,14 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
- (op_ ==Token::SUB) ||
- (op_ == Token::MUL) ||
- (op_ == Token::DIV))) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label call_runtime, after_alloc_failure;
- Label left_smi, right_smi, load_right, do_op;
- __ test(left_, Immediate(kSmiTagMask));
- __ j(zero, &left_smi);
- __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- __ sar(left_, 1);
- __ cvtsi2sd(xmm0, Operand(left_));
- __ shl(left_, 1);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&load_right);
- __ test(right_, Immediate(kSmiTagMask));
- __ j(zero, &right_smi);
- __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- __ sar(right_, 1);
- __ cvtsi2sd(xmm1, Operand(right_));
- __ shl(right_, 1);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
-
- __ bind(&after_alloc_failure);
- __ pop(left_);
- __ bind(&call_runtime);
- }
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
- __ bind(&done);
}
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- StaticType* type,
+ SmiAnalysis* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@@ -1580,7 +1491,7 @@ void DeferredInlineSmiSub::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
- StaticType* type,
+ SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@@ -1865,8 +1776,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
+void CodeGenerator::Comparison(Condition cc,
bool strict,
ControlDestination* dest) {
// Strict only makes sense for equality comparisons.
@@ -1913,8 +1823,7 @@ void CodeGenerator::Comparison(AstNode* node,
default:
UNREACHABLE();
}
- } else {
- // Only one side is a constant Smi.
+ } else { // Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
@@ -1928,8 +1837,6 @@ void CodeGenerator::Comparison(AstNode* node,
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
@@ -1937,50 +1844,11 @@ void CodeGenerator::Comparison(AstNode* node,
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
- bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
- && node->AsCompareOperation()->is_for_loop_condition();
- if (!is_for_loop_compare
- && CpuFeatures::IsSupported(SSE2)
- && right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_number.Branch(not_equal, &left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ comisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- Condition double_cc = cc;
- switch (cc) {
- case less: double_cc = below; break;
- case equal: double_cc = equal; break;
- case less_equal: double_cc = below_equal; break;
- case greater: double_cc = above; break;
- case greater_equal: double_cc = above_equal; break;
- default: UNREACHABLE();
- }
- dest->true_target()->Branch(double_cc);
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
-
// Setup and call the compare stub.
CompareStub stub(cc, strict);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
@@ -2004,7 +1872,6 @@ void CodeGenerator::Comparison(AstNode* node,
right_side.Unuse();
dest->Split(cc);
}
-
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@@ -2041,8 +1908,7 @@ void CodeGenerator::Comparison(AstNode* node,
operand.Unuse();
dest->Split(not_zero);
}
- } else {
- // Neither side is a constant Smi or null.
+ } else { // Neither side is a constant Smi or null.
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
@@ -2709,7 +2575,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compare and branch to the body if true or the next test if
// false. Prefer the next test as a fall through.
ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
+ Comparison(equal, true, &dest);
// If the comparison fell through to the true target, jump to the
// actual body.
@@ -3719,28 +3585,18 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ // Call the runtime to instantiate the function boilerplate object.
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
ASSERT(boilerplate->IsBoilerplate());
+ frame_->SyncRange(0, frame_->element_count() - 1);
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
- FastNewClosureStub stub;
- frame_->Push(boilerplate);
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
- } else {
- // Call the runtime to instantiate the function boilerplate
- // object. The inevitable call will sync frame elements to memory
- // anyway, so we do it eagerly to allow us to push the arguments
- // directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Create a new closure.
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(boilerplate));
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
- }
+ // Create a new closure.
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(boilerplate));
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
}
@@ -4439,23 +4295,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Push the resulting array literal boilerplate on the stack.
frame_->Push(&boilerplate);
-
// Clone the boilerplate object.
- int length = node->values()->length();
- Result clone;
- if (node->depth() == 1 &&
- length <= FastCloneShallowArrayStub::kMaximumLength) {
- FastCloneShallowArrayStub stub(length);
- clone = frame_->CallStub(&stub, 1);
- } else {
- clone = frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
}
+ Result clone = frame_->CallRuntime(clone_function_id, 1);
// Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
// literals.
- for (int i = 0; i < length; i++) {
+ for (int i = 0; i < node->values()->length(); i++) {
Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the
@@ -4684,6 +4535,9 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(var->name());
+
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
@@ -4695,16 +4549,14 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
- // Push the name of the function onto the frame.
- frame_->Push(var->name());
-
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
- frame_->Push(&result);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@@ -4757,7 +4609,8 @@ void CodeGenerator::VisitCall(Call* node) {
node->position());
} else {
- // Push the receiver onto the frame.
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(name);
Load(property->obj());
// Load the arguments.
@@ -4766,16 +4619,14 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result =
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
loop_nesting());
frame_->RestoreContextRegister();
- frame_->Push(&result);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
}
} else {
@@ -5433,6 +5284,8 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
Runtime::Function* function = node->function();
if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ frame_->Push(node->name());
// Push the builtins object found in the current global object.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
@@ -5449,12 +5302,11 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
- frame_->Push(node->name());
Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting_);
frame_->RestoreContextRegister();
- frame_->Push(&answer);
+ frame_->SetElementAt(0, &answer);
} else {
// Call the C runtime function.
Result answer = frame_->CallRuntime(function, arg_count);
@@ -6122,7 +5974,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
Load(left);
Load(right);
- Comparison(node, cc, strict, destination());
+ Comparison(cc, strict, destination());
}
@@ -6576,7 +6428,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- StaticType* key_smi_analysis = property->key()->type();
+ SmiAnalysis* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@@ -6677,133 +6529,6 @@ void Reference::SetValue(InitState init_state) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Clone the boilerplate in new space. Set the context to the
- // current context in esi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the boilerplate function from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-
- // Clone the rest of the boilerplate fields. We don't have to update
- // the write barrier because the allocated object is in new space.
- for (int offset = kPointerSize;
- offset < JSFunction::kSize;
- offset += kPointerSize) {
- if (offset == JSFunction::kContextOffset) {
- __ mov(FieldOperand(eax, offset), esi);
- } else {
- __ mov(ebx, FieldOperand(edx, offset));
- __ mov(FieldOperand(eax, offset), ebx);
- }
- }
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
-
- // Setup the fixed slots.
- __ xor_(ebx, Operand(ebx)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the surrounding context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoid multiple limit checks.
- Label gc;
- __ AllocateInNewSpace(size, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the boilerplate from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- }
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&gc);
- ExternalReference runtime(Runtime::kCloneShallowLiteralBoilerplate);
- __ TailCallRuntime(runtime, 1, 1);
-}
-
-
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
@@ -7716,90 +7441,18 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
static const int kDisplacement = 2 * kPointerSize;
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
+ Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate);
+ __ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
__ mov(Operand(esp, 2 * kPointerSize), edx);
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, Operand(ecx));
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ sar(ecx, kSmiTagSize);
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
- __ dec(ecx);
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
@@ -8653,7 +8306,6 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi));
- ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
// Allocate an acsii cons string.
@@ -8696,7 +8348,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- ASSERT(kStringEncodingMask == kAsciiStringTag);
+ ASSERT(kAsciiStringTag != 0);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 3d17c96ad0..11a5163db8 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -434,7 +434,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
- StaticType* type,
+ SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@@ -447,7 +447,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
- StaticType* type,
+ SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
@@ -459,8 +459,7 @@ class CodeGenerator: public AstVisitor {
Result* right,
OverwriteMode overwrite_mode);
- void Comparison(AstNode* node,
- Condition cc,
+ void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
@@ -666,8 +665,7 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode),
flags_(flags),
args_in_registers_(false),
- args_reversed_(false),
- name_(NULL) {
+ args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -686,7 +684,6 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
- char* name_;
const char* GetName();
@@ -728,8 +725,8 @@ class GenericBinaryOpStub: public CodeStub {
bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -763,11 +760,11 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
+ Register desc,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
// Should the stub check whether arguments are strings?
bool string_check_;
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 375cbdf7eb..df5a28a54b 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1049,14 +1049,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (*data == 0x57) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index 46524d7dce..c5d544127a 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -412,24 +412,46 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ mov(Operand(ebp, SlotOffset(var->slot())),
- Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(ebp, SlotOffset(var->slot())));
- }
- break;
-
- case Slot::CONTEXT:
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT(slot != NULL); // No global declarations here.
+
+ // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
+ switch (slot->type()) {
+ case Slot::LOOKUP: {
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ push(Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // No initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ mov(Operand(ebp, SlotOffset(var->slot())),
+ Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(ebp, SlotOffset(var->slot())));
+ }
+ break;
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
+ if (decl->mode() == Variable::CONST) {
+ __ mov(eax, Immediate(Factory::the_hole_value()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ mov(ebx,
@@ -437,70 +459,26 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
}
- if (decl->mode() == Variable::CONST) {
- __ mov(eax, Immediate(Factory::the_hole_value()));
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
- // No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(eax);
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(esi, offset, eax, ecx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(esi);
- __ push(Immediate(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ push(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ push(Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- } else {
- __ push(Immediate(Smi::FromInt(0))); // No initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ // No write barrier since the_hole_value is in old space.
+ ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(eax);
- } else {
- __ Set(eax, Immediate(Factory::the_hole_value()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ mov(ebx,
+ CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ cmp(ebx, Operand(esi));
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(esi, offset, eax, ecx);
}
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Absence of a test eax instruction following the call
- // indicates that none of the load was inlined.
-
- // Value in eax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- }
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -515,6 +493,20 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ Expression* expr = stmt->expression();
+ if (expr->AsLiteral() != NULL) {
+ __ mov(eax, expr->AsLiteral()->handle());
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(eax);
+ }
+ EmitReturnSequence(stmt->statement_pos());
+}
+
+
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -535,20 +527,14 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
+ Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
- ASSERT(var->is_global());
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
- __ mov(ecx, var->name());
+ __ mov(ecx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// By emitting a nop we make sure that we do not have a test eax
@@ -556,7 +542,8 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
- DropAndMove(context, eax);
+
+ DropAndMove(expr->context(), eax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@@ -577,7 +564,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
- Move(context, slot, eax);
+ Move(expr->context(), slot, eax);
} else {
Comment cmnt(masm_, "Variable rewritten to Property");
// A variable has been rewritten into an explicit access to
@@ -611,8 +598,9 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Notice: We must not have a "test eax, ..." instruction after
// the call. It is treated specially by the LoadIC code.
__ nop();
- // Drop key and object left on the stack by IC.
- DropAndMove(context, eax, 2);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), eax, 2);
}
}
@@ -646,14 +634,35 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Label exists;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = boilerplate
+
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(ebx);
+ // Literal index (1).
__ push(Immediate(Smi::FromInt(expr->literal_index())));
+ // Constant properties (2).
__ push(Immediate(expr->constant_properties()));
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&exists);
+ // eax contains boilerplate.
+ // Clone boilerplate.
+ __ push(eax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@@ -749,14 +758,31 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ Label make_clone;
+
+ // Fetch the function's literals array.
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ // Check if the literal's boilerplate has been instantiated.
+ int offset =
+ FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+ __ mov(eax, FieldOperand(ebx, offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &make_clone);
+
+ // Instantiate the boilerplate.
+ __ push(ebx);
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->literals()));
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+ __ bind(&make_clone);
+ // Clone the boilerplate.
+ __ push(eax);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -826,37 +852,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- Literal* key = prop->key()->AsLiteral();
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- Move(context, eax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- Move(context, eax);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Move(context, eax);
-}
-
-
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
@@ -961,6 +960,35 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ Property* property = var->rewrite()->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Load object and key onto the stack.
+ Slot* object_slot = property->obj()->AsSlot();
+ ASSERT_NOT_NULL(object_slot);
+ Move(Expression::kValue, object_slot, eax);
+
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ Move(Expression::kValue, key_literal);
+
+ // Value to store was pushed before object and key on the stack.
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+
+ // Arguments to ic is value in eax, object and key on stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ if (expr->context() == Expression::kEffect) {
+ __ add(Operand(esp), Immediate(3 * kPointerSize));
+ } else if (expr->context() == Expression::kValue) {
+ // Value is still on the stack in esp[2 * kPointerSize]
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ } else {
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ DropAndMove(expr->context(), eax, 3);
+ }
}
}
@@ -1066,9 +1094,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1076,15 +1102,16 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
Visit(args->at(i));
ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- __ Set(ecx, Immediate(name));
- // Record source position of the IC call.
+ // Record source position for debugger.
SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, mode);
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, reloc_info);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Move(expr->context(), eax);
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
}
@@ -1101,6 +1128,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
DropAndMove(expr->context(), eax);
}
@@ -1114,9 +1142,11 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to the identifier 'eval'.
UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) {
- // Push global object as receiver for the call IC.
+ // Call to a global variable.
+ __ push(Immediate(var->name()));
+ // Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1127,8 +1157,9 @@ void FastCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
+ __ push(Immediate(key->handle()));
Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@@ -1220,6 +1251,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
+ __ push(Immediate(expr->name()));
__ mov(eax, CodeGenerator::GlobalObject());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
}
@@ -1232,18 +1264,19 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
__ call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), eax);
}
- Move(expr->context(), eax);
}
@@ -1652,65 +1685,7 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), eax);
-}
-
-
-Register FastCodeGenerator::result_register() { return eax; }
-
-
-Register FastCodeGenerator::context_register() { return esi; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FastCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
- __ sub(Operand(edx), Immediate(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
-
-
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(edx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
- __ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
-}
-
-
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 58fe2dc994..6988fe09f6 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -48,13 +48,9 @@ namespace internal {
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register r0,
- Register r1,
- Register r2,
- Register name,
- DictionaryCheck check_dictionary) {
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register r0, Register r1, Register r2,
+ Register name) {
// Register use:
//
// r0 - used to hold the property dictionary.
@@ -90,15 +86,11 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
- // Load properties array.
- __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
-
// Check that the properties array is a dictionary.
- if (check_dictionary == CHECK_DICTIONARY) {
- __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(not_equal, miss_label);
- }
+ __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, miss_label);
// Compute the capacity mask.
const int kCapacityOffset =
@@ -231,8 +223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : name
// -- esp[8] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string;
- Label check_pixel_array, probe_dictionary;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
@@ -311,72 +302,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
- // Is the string a symbol?
+ // If the string is a symbol, do a quick inline probe of the receiver's
+ // dictionary, if it exists.
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in ecx.
- __ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, ebx);
- __ shr(edx, KeyedLookupCache::kMapHashShift);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ shr(eax, String::kHashShift);
- __ xor_(edx, Operand(eax));
- __ and_(edx, KeyedLookupCache::kCapacityMask);
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys();
- __ mov(edi, edx);
- __ shl(edi, kPointerSizeLog2 + 1);
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
- __ cmp(edi, Operand(esp, kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset and check that it is an in-object property.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
- __ mov(eax,
- Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
- __ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ cmp(eax, Operand(edx));
- __ j(above_equal, &slow);
-
- // Load in-object property.
- __ sub(eax, Operand(edx));
- __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, Operand(edx));
- __ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- GenerateDictionaryLoad(masm,
- &slow,
- ebx,
- ecx,
- edx,
- eax,
- DICTIONARY_CHECK_DONE);
+ // Probe the dictionary leaving result in ecx.
+ GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
-
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
@@ -888,16 +824,13 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver
+ __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
@@ -943,7 +876,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -951,34 +884,27 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Search dictionary - put result in register edi.
- __ mov(edi, edx);
- GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
+ // Search dictionary - put result in register edx.
+ GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
- // Check that the result is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
+ // Move the result to register edi and check that it isn't a smi.
+ __ mov(edi, Operand(edx));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+ // Check that the value is a JavaScript function.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
__ j(not_equal, miss, not_taken);
- // Check that the function has been loaded. eax holds function's map.
- __ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
- __ test(eax, Immediate(1 << Map::kNeedsLoading));
+ // Check that the function has been loaded.
+ __ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
+ __ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
+ __ test(edx, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
- // Patch the receiver on stack with the global proxy if necessary.
+ // Patch the receiver with the global proxy if necessary.
if (is_global_object) {
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
@@ -991,17 +917,14 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver.
+ __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@@ -1050,33 +973,33 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function to call from the stack.
+ // 2 ~ receiver, return address.
+ __ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
// Enter an internal frame.
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ push(edx);
- __ push(ecx);
+ __ push(ebx);
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(ebx, Immediate(f));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@@ -1088,11 +1011,11 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &invoke, not_taken);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, &global);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
+ __ cmp(ecx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke);
// Patch the receiver on the stack.
@@ -1165,7 +1088,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ac2895efaf..b91caa8ccc 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -504,13 +504,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@@ -841,9 +834,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+ shl(scratch1, 1);
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
@@ -1022,37 +1016,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-Object* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -1357,18 +1331,6 @@ void MacroAssembler::Ret() {
}
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- add(Operand(esp), Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, value);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 160dbcbf16..a41d42e82b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -149,8 +149,6 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -287,22 +285,12 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub. Generate the code if necessary.
+ // Call a code stub.
void CallStub(CodeStub* stub);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- Object* TryCallStub(CodeStub* stub);
-
- // Tail call a code stub (jump). Generate the code if necessary.
+ // Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- Object* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -335,12 +323,6 @@ class MacroAssembler: public Assembler {
void Ret();
- void Drop(int element_count);
-
- void Call(Label* target) { call(target); }
-
- void Move(Register target, Handle<Object> value);
-
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 0e836154d3..425c51dcae 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -152,10 +152,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
+template <typename Pushable>
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
- Register name,
+ Pushable name,
JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
@@ -284,10 +285,11 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
+template <class Pushable>
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- Register name,
+ Pushable name,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
@@ -493,8 +495,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(const ParameterCount& arguments, Register name)
- : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+ explicit CallInterceptorCompiler(const ParameterCount& arguments)
+ : arguments_(arguments), argc_(arguments.immediate()) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
@@ -525,17 +527,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
}
__ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ __ push(holder); // save the holder
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(
+ masm,
+ receiver,
+ holder,
+ // Under EnterInternalFrame this refers to name.
+ Operand(ebp, (argc_ + 3) * kPointerSize),
+ holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ __ pop(receiver); // restore holder
__ LeaveInternalFrame();
__ cmp(eax, Factory::no_interceptor_result_sentinel());
@@ -575,13 +577,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Label* miss_label) {
__ EnterInternalFrame();
- // Save the name_ register across the call.
- __ push(name_);
PushInterceptorArguments(masm,
receiver,
holder,
- name_,
+ Operand(ebp, (argc_ + 3) * kPointerSize),
holder_obj);
ExternalReference ref = ExternalReference(
@@ -592,15 +592,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CEntryStub stub(1);
__ CallStub(&stub);
- // Restore the name_ register.
- __ pop(name_);
__ LeaveInternalFrame();
}
private:
const ParameterCount& arguments_;
int argc_;
- Register name_;
};
@@ -757,7 +754,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
+void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -765,8 +762,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss,
- Failure** failure) {
+ Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@@ -802,14 +798,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
- // Calling the stub may try to allocate (if the code is not already
- // generated). Do not allow the call to perform a garbage
- // collection but instead return the allocation failure object.
- Object* result = masm()->TryCallStub(&stub);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
+ __ CallStub(&stub);
// We need to avoid using eax since that now holds the result.
Register tmp = other.is(eax) ? reg : other;
@@ -817,7 +806,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
__ LeaveInternalFrame();
__ ret(0);
- return true;
}
@@ -897,11 +885,6 @@ Object* CallStubCompiler::CompileCallField(Object* object,
int index,
String* name) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -916,7 +899,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
+ ebx, ecx, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -952,11 +935,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name,
CheckType check) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -978,7 +956,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
+ ebx, ecx, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -990,15 +968,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case STRING_CHECK:
// Check that the object is a two-byte string or a symbol.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- __ cmp(eax, FIRST_NONSTRING_TYPE);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ecx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
@@ -1007,14 +985,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &fast, taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ecx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
@@ -1030,15 +1008,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ecx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
+ ebx, ecx, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -1081,11 +1059,6 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -1098,7 +1071,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments(), ecx);
+ CallInterceptorCompiler compiler(arguments());
CompileLoadInterceptor(&compiler,
this,
masm(),
@@ -1108,7 +1081,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
&lookup,
edx,
ebx,
- edi,
+ ecx,
&miss);
// Restore receiver.
@@ -1147,11 +1120,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -1170,32 +1138,15 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+ CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, &miss, not_taken);
- } else {
- __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, &miss, not_taken);
- }
+ __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+ __ j(not_equal, &miss, not_taken);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@@ -1469,10 +1420,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
- AccessorInfo* callback) {
+ AccessorInfo* callback,
+ String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1481,11 +1432,8 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+ callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1649,11 +1597,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+ callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index ba6488607d..e770cddb15 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -925,17 +925,14 @@ Result VirtualFrame::CallKeyedStoreIC() {
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
+ // Arguments, receiver, and function name are on top of the frame.
+ // The IC expects them on the stack. It does not drop the function
+ // name slot (but it does drop the rest).
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
+ PrepareForCall(arg_count + 2, arg_count + 1);
return RawCallCodeObject(ic, mode);
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 6c6b4816d7..314ea73b28 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -341,9 +341,9 @@ class VirtualFrame: public ZoneObject {
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call. The argument count does not
- // include the receiver.
+ // Call call IC. Arguments, reciever, and function name are found
+ // on top of the frame. Function name slot is not dropped. The
+ // argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 2661a10241..2779356c0e 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -409,7 +409,7 @@ Object* CallIC::LoadFunction(State state,
if (!lookup.IsValid()) {
// If the object does not have the requested property, check which
// exception we need to throw.
- if (IsContextual(object)) {
+ if (is_contextual()) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@@ -428,7 +428,7 @@ Object* CallIC::LoadFunction(State state,
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
- if (IsContextual(object)) {
+ if (is_contextual()) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@@ -628,7 +628,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
- if (FLAG_strict || IsContextual(object)) {
+ if (FLAG_strict || is_contextual()) {
return ReferenceError("not_defined", name);
}
LOG(SuspectReadEvent(*name, *object));
@@ -671,7 +671,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
- if (attr == ABSENT && IsContextual(object)) {
+ if (attr == ABSENT && is_contextual()) {
return ReferenceError("not_defined", name);
}
return result;
@@ -843,7 +843,7 @@ Object* KeyedLoadIC::Load(State state,
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
- if (FLAG_strict || IsContextual(object)) {
+ if (FLAG_strict || is_contextual()) {
return ReferenceError("not_defined", name);
}
}
@@ -859,7 +859,7 @@ Object* KeyedLoadIC::Load(State state,
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
- if (attr == ABSENT && IsContextual(object)) {
+ if (attr == ABSENT && is_contextual()) {
return ReferenceError("not_defined", name);
}
return result;
@@ -1292,6 +1292,16 @@ Object* CallIC_Miss(Arguments args) {
}
+void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
// Used from ic_<arch>.cc.
Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index f53c6ddf00..870908838b 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -33,11 +33,6 @@
namespace v8 {
namespace internal {
-// Flag indicating whether an IC stub needs to check that a backing
-// store is in dictionary case.
-enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
-
-
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
@@ -104,16 +99,7 @@ class IC {
// Returns if this IC is for contextual (no explicit receiver)
// access to properties.
- bool IsContextual(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return SlowIsContextual();
- } else {
- ASSERT(!SlowIsContextual());
- return false;
- }
- }
-
- bool SlowIsContextual() {
+ bool is_contextual() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
@@ -189,14 +175,16 @@ class CallIC: public IC {
// Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
- }
+ static void GenerateInitialize(MacroAssembler* masm, int argc);
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
private:
+ static void Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f);
+
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 0fe4328234..63a6d6eb78 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -77,13 +77,8 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2.h"
-#include "arm/assembler-thumb2-inl.h"
-#else
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
-#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#else
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 7a5353745e..81819b7f67 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -155,8 +155,6 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
StubCache::Clear();
- ExternalStringTable::CleanUp();
-
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
if (HasCompacted()) return;
@@ -371,18 +369,41 @@ class RootMarkingVisitor : public ObjectVisitor {
class SymbolTableCleaner : public ObjectVisitor {
public:
SymbolTableCleaner() : pointers_removed_(0) { }
-
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away.
+ // Since the object is not marked we can access its map word safely
+ // without having to worry about marking bits in the object header.
+ Map* map = HeapObject::cast(*p)->map();
// Since no objects have yet been moved we can safely access the map of
// the object.
- if ((*p)->IsExternalString()) {
- Heap::FinalizeExternalString(String::cast(*p));
+ uint32_t type = map->instance_type();
+ bool is_external = (type & kStringRepresentationMask) ==
+ kExternalStringTag;
+ if (is_external) {
+ bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
+ byte* resource_addr = reinterpret_cast<byte*>(*p) +
+ ExternalString::kResourceOffset -
+ kHeapObjectTag;
+ if (is_two_byte) {
+ v8::String::ExternalStringResource** resource =
+ reinterpret_cast<v8::String::ExternalStringResource**>
+ (resource_addr);
+ delete *resource;
+ // Clear the resource pointer in the symbol.
+ *resource = NULL;
+ } else {
+ v8::String::ExternalAsciiStringResource** resource =
+ reinterpret_cast<v8::String::ExternalAsciiStringResource**>
+ (resource_addr);
+ delete *resource;
+ // Clear the resource pointer in the symbol.
+ *resource = NULL;
+ }
}
// Set the entry to null_value (as deleted).
*p = Heap::raw_unchecked_null_value();
@@ -525,7 +546,34 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
}
+class SymbolMarkingVisitor : public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ MarkingVisitor marker;
+ for (Object** p = start; p < end; p++) {
+ if (!(*p)->IsHeapObject()) continue;
+
+ HeapObject* object = HeapObject::cast(*p);
+ // If the object is marked, we have marked or are in the process
+ // of marking subparts.
+ if (object->IsMarked()) continue;
+
+ // The object is unmarked, we do not need to unmark to use its
+ // map.
+ Map* map = object->map();
+ object->IterateBody(map->instance_type(),
+ object->SizeFromMap(map),
+ &marker);
+ }
+ }
+};
+
+
void MarkCompactCollector::MarkSymbolTable() {
+ // Objects reachable from symbols are marked as live so as to ensure
+ // that if the symbol itself remains alive after GC for any reason,
+ // and if it is a cons string backed by an external string (even indirectly),
+ // then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@@ -533,6 +581,11 @@ void MarkCompactCollector::MarkSymbolTable() {
MarkingVisitor marker;
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack(&marker);
+ // Mark subparts of the symbols but not the symbols themselves
+ // (unless reachable from another symbol).
+ SymbolMarkingVisitor symbol_marker;
+ symbol_table->IterateElements(&symbol_marker);
+ ProcessMarkingStack(&marker);
}
@@ -721,8 +774,6 @@ void MarkCompactCollector::MarkLiveObjects() {
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
- ExternalStringTable::Iterate(&v);
- ExternalStringTable::CleanUp();
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
@@ -836,8 +887,11 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// space are encoded in their map pointer word (along with an encoding of
// their map pointers).
//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
+// 31 21 20 10 9 0
+// +-----------------+------------------+-----------------+
+// |forwarding offset|page offset of map|page index of map|
+// +-----------------+------------------+-----------------+
+// 11 bits 11 bits 10 bits
//
// An address range [start, end) can have both live and non-live objects.
// Maximal non-live regions are marked so they can be skipped on subsequent
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 07f729505e..e3d266e4be 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -29,6 +29,7 @@
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
// changes to these properties.
+const $Infinity = global.Infinity;
const $floor = MathFloor;
const $random = MathRandom;
const $abs = MathAbs;
@@ -117,40 +118,26 @@ function MathLog(x) {
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
+ var r = -$Infinity;
var length = %_ArgumentsLength();
- if (length == 0) {
- return -1/0; // Compiler constant-folds this to -Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = ToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = ToNumber(n);
+ for (var i = 0; i < length; i++) {
+ var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
- // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
- // a Smi or heap number.
- if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+ // Make sure +0 is considered greater than -0.
+ if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
}
return r;
}
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
+ var r = $Infinity;
var length = %_ArgumentsLength();
- if (length == 0) {
- return 1/0; // Compiler constant-folds this to Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = ToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = ToNumber(n);
+ for (var i = 0; i < length; i++) {
+ var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
- // Smi or a heap number.
- if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+ // Make sure -0 is considered less than +0.
+ if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
}
return r;
}
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index bdcbf918ec..1e5053d7eb 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -157,11 +157,6 @@ function FormatMessage(message) {
instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
null_to_object: "Cannot convert null to object",
reduce_no_initial: "Reduce of empty array with no initial value",
- getter_must_be_callable: "Getter must be a function: %0",
- setter_must_be_callable: "Setter must be a function: %0",
- value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
- proto_object_or_null: "Object prototype may only be an Object or null",
- property_desc_object: "Property description must be an object: %0",
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 45690381fa..8514a412c1 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -952,14 +952,14 @@ MapWord MapWord::EncodeAddress(Address map_address, int offset) {
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
- uintptr_t compact_offset = offset >> kObjectAlignmentBits;
+ int compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
- uintptr_t map_page_offset =
- map_page->Offset(map_address) >> kMapAlignmentBits;
+ int map_page_offset =
+ map_page->Offset(map_address) >> kObjectAlignmentBits;
uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) |
@@ -975,8 +975,8 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = static_cast<int>(
- ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
- kMapAlignmentBits);
+ ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
+ << kObjectAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
@@ -1499,7 +1499,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- // Make sure none of the elements in desc are in new space.
+ // Make sure non of the elements in desc are in new space.
ASSERT(!Heap::InNewSpace(desc->GetKey()));
ASSERT(!Heap::InNewSpace(desc->GetValue()));
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 52f1f9af61..0f8dca398d 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1351,8 +1351,6 @@ Object* JSObject::AddFastProperty(String* name,
Object* JSObject::AddConstantFunctionProperty(String* name,
JSFunction* function,
PropertyAttributes attributes) {
- ASSERT(!Heap::InNewSpace(function));
-
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors =
@@ -1439,7 +1437,7 @@ Object* JSObject::AddProperty(String* name,
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction()) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@@ -3256,8 +3254,7 @@ Object* DescriptorArray::Allocate(int number_of_descriptors) {
return Heap::empty_descriptor_array();
}
// Allocate the array of keys.
- Object* array =
- Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (array->IsFailure()) return array;
// Do not use DescriptorArray::cast on incomplete object.
FixedArray* result = FixedArray::cast(array);
@@ -7965,10 +7962,7 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
- if (type == NORMAL &&
- (!value->IsJSFunction() || Heap::InNewSpace(value))) {
- number_of_fields += 1;
- }
+ if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1;
}
}
@@ -7999,7 +7993,7 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyDetails details = DetailsAt(i);
PropertyType type = details.type();
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction()) {
ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value),
details.attributes(),
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 03e2ca19b9..671978ab7f 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -892,25 +892,15 @@ class MapWord BASE_EMBEDDED {
static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
- // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
- // used.
+ // Forwarding pointers and map pointer encoding
+ // 31 21 20 10 9 0
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
- // ^ ^ ^
- // | | |
- // | | kMapPageIndexBits
- // | kMapPageOffsetBits
- // kForwardingOffsetBits
- static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
- static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
- static const int kMapPageIndexBits = 16;
-#else
- // Use all the 32-bits to encode on a 32-bit platform.
- static const int kMapPageIndexBits =
- 32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
+ // 11 bits 11 bits 10 bits
+ static const int kMapPageIndexBits = 10;
+ static const int kMapPageOffsetBits = 11;
+ static const int kForwardingOffsetBits = 11;
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
@@ -918,12 +908,16 @@ class MapWord BASE_EMBEDDED {
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
- // Bit masks covering the different parts the encoding.
- static const uintptr_t kMapPageIndexMask =
+ // 0x000003FF
+ static const uint32_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
- static const uintptr_t kMapPageOffsetMask =
+
+ // 0x001FFC00
+ static const uint32_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
- static const uintptr_t kForwardingOffsetMask =
+
+ // 0xFFE00000
+ static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
@@ -1668,7 +1662,6 @@ class DescriptorArray: public FixedArray {
public:
// Is this the singleton empty_descriptor_array?
inline bool IsEmpty();
-
// Returns the number of descriptors in the array.
int number_of_descriptors() {
return IsEmpty() ? 0 : length() - kFirstIndex;
@@ -1808,14 +1801,12 @@ class DescriptorArray: public FixedArray {
static int ToKeyIndex(int descriptor_number) {
return descriptor_number+kFirstIndex;
}
-
- static int ToDetailsIndex(int descriptor_number) {
- return (descriptor_number << 1) + 1;
- }
-
static int ToValueIndex(int descriptor_number) {
return descriptor_number << 1;
}
+ static int ToDetailsIndex(int descriptor_number) {
+ return( descriptor_number << 1) + 1;
+ }
bool is_null_descriptor(int descriptor_number) {
return PropertyDetails(GetDetails(descriptor_number)).type() ==
@@ -2847,6 +2838,7 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, FixedArray)
+ // Returns a copy of the map.
Object* CopyDropDescriptors();
// Returns a copy of the map, with all transitions dropped from the
@@ -2914,8 +2906,7 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kPadStart = kCodeCacheOffset + kPointerSize;
- static const int kSize = MAP_SIZE_ALIGN(kPadStart);
+ static const int kSize = kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index a3bc6dab22..c37078ce64 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -2657,9 +2657,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expression* cond = NULL;
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
- if (cond && cond->AsCompareOperation()) {
- cond->AsCompareOperation()->set_is_for_loop_condition();
- }
}
Expect(Token::SEMICOLON, CHECK_OK);
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 9ef7270265..87da026426 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -594,11 +594,11 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
- explicit IndentedScope(const char* txt, StaticType* type = NULL) {
+ explicit IndentedScope(const char* txt, SmiAnalysis* type = NULL) {
ast_printer_->PrintIndented(txt);
if ((type != NULL) && (type->IsKnown())) {
ast_printer_->Print(" (type = ");
- ast_printer_->Print(StaticType::Type2String(type));
+ ast_printer_->Print(SmiAnalysis::Type2String(type));
ast_printer_->Print(")");
}
ast_printer_->Print("\n");
@@ -657,7 +657,7 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type) {
+ SmiAnalysis* type) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
@@ -665,7 +665,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
if (type->IsKnown()) {
OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
Variable::Mode2String(var->mode()),
- StaticType::Type2String(type));
+ SmiAnalysis::Type2String(type));
} else {
OS::SNPrintF(buf, "%s (mode = %s)", info,
Variable::Mode2String(var->mode()));
@@ -1072,7 +1072,7 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
OS::SNPrintF(buf, "%s %s (type = %s)",
(node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()),
- StaticType::Type2String(node->type()));
+ SmiAnalysis::Type2String(node->type()));
} else {
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index dfff49a45a..f885cb31f2 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -102,7 +102,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type);
+ SmiAnalysis* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index b05cfae309..de1b95b99c 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -367,7 +367,7 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
if (proxy != NULL) {
Variable* var = proxy->AsVariable();
if (var != NULL) {
- StaticType* var_type = var->type();
+ SmiAnalysis* var_type = var->type();
if (var_type->IsUnknown()) {
var_type->CopyFrom(node->type());
} else if (var_type->IsLikelySmi()) {
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 834983344d..65dfd1326d 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -398,82 +398,6 @@ static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
}
-static Object* Runtime_CreateObjectLiteral(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
-}
-
-
-static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
-}
-
-
-static Object* Runtime_CreateArrayLiteral(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
-}
-
-
-static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
-}
-
-
static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]);
@@ -720,7 +644,7 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
// Copy the function and update its context. Use it as value.
Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
Handle<JSFunction> function =
- Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
value = function;
}
@@ -795,15 +719,12 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
if (*initial_value != NULL) {
if (index >= 0) {
// The variable or constant context slot should always be in
- // the function context or the arguments object.
- if (holder->IsContext()) {
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
- }
- } else {
- Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
+ // the function context; not in any outer context nor in the
+ // arguments object.
+ ASSERT(holder.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@@ -4502,11 +4423,8 @@ static Object* Runtime_NewClosure(Arguments args) {
CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
- PretenureFlag pretenure = (context->global_context() == *context)
- ? TENURED // Allocate global closures in old space.
- : NOT_TENURED; // Allocate local closures in new space.
Handle<JSFunction> result =
- Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *result;
}
@@ -5222,7 +5140,7 @@ static Object* Runtime_CompileString(Arguments args) {
validate);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
- Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *fun;
}
@@ -5250,7 +5168,7 @@ static Object* CompileDirectEval(Handle<String> source) {
Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
- Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *fun;
}
@@ -7887,8 +7805,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
HandleScope scope;
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
+ int initial_size = limit < 10 ? limit : 10;
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index f13c42433d..858023317f 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -223,10 +223,6 @@ namespace internal {
F(CreateObjectLiteralBoilerplate, 3, 1) \
F(CloneLiteralBoilerplate, 1, 1) \
F(CloneShallowLiteralBoilerplate, 1, 1) \
- F(CreateObjectLiteral, 3, 1) \
- F(CreateObjectLiteralShallow, 3, 1) \
- F(CreateArrayLiteral, 3, 1) \
- F(CreateArrayLiteralShallow, 3, 1) \
\
/* Catch context extension objects */ \
F(CreateCatchExtensionObject, 2, 1) \
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 1b65fe51d4..105749a759 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -122,12 +122,6 @@ function COMPARE(x, ncr) {
return %StringCompare(this, x);
}
- // If one of the operands is undefined, it will convert to NaN and
- // thus the result should be as if one of the operands was NaN.
- if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
- return ncr;
- }
-
// Default implementation.
var a = %ToPrimitive(this, NUMBER_HINT);
var b = %ToPrimitive(x, NUMBER_HINT);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index a47d3730a3..7da06cdbc0 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -189,7 +189,8 @@ void Scope::Initialize(bool inside_with) {
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
- receiver_ = var;
+ receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
+ receiver_->BindTo(var);
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 9b506d989e..fc627df619 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -206,13 +206,8 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Accessors.
- // A new variable proxy corresponding to the (function) receiver.
- VariableProxy* receiver() const {
- VariableProxy* proxy =
- new VariableProxy(Factory::this_symbol(), true, false);
- proxy->BindTo(receiver_);
- return proxy;
- }
+ // The variable corresponding to the (function) receiver.
+ VariableProxy* receiver() const { return receiver_; }
// The variable holding the function literal for named function
// literals, or NULL.
@@ -319,7 +314,7 @@ class Scope: public ZoneObject {
// Declarations.
ZoneList<Declaration*> decls_;
// Convenience variable.
- Variable* receiver_;
+ VariableProxy* receiver_;
// Function variable, if any; function scopes only.
Variable* function_;
// Convenience variable; function scopes only.
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index fe042524dd..899e2e7a59 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -55,8 +55,9 @@ class SerializationAddressMapper {
static int MappedTo(HeapObject* obj) {
ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+ return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
+ Hash(obj),
+ false)->value);
}
static void Map(HeapObject* obj, int to) {
@@ -80,7 +81,7 @@ class SerializationAddressMapper {
}
static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ return reinterpret_cast<intptr_t>(obj->address());
}
static void* Key(HeapObject* obj) {
@@ -484,15 +485,6 @@ void ExternalReferenceTable::PopulateTable() {
21,
"NativeRegExpMacroAssembler::GrowStack()");
#endif
- // Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys().address(),
- UNCLASSIFIED,
- 22,
- "KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
- UNCLASSIFIED,
- 23,
- "KeyedLookupCache::field_offsets()");
}
@@ -632,7 +624,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
return HeapObject::FromAddress(pages_[space][0] + offset);
}
ASSERT(SpaceIsPaged(space));
- int page_of_pointee = offset >> kPageSizeBits;
+ int page_of_pointee = offset >> Page::kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
@@ -972,8 +964,8 @@ void Serializer::SerializeObject(
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
+ if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
+ (address >> Page::kPageSizeBits)) {
from_start = false;
address = offset;
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index f4d0cb0d86..f3b6b9f639 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -398,7 +398,7 @@ static int PagesInChunk(Address start, size_t size) {
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
+ - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
}
@@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
- requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
+ requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index faeafafcea..75b992ffee 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -65,23 +65,20 @@ namespace internal {
// Some assertion macros used in the debugging mode.
-#define ASSERT_PAGE_ALIGNED(address) \
+#define ASSERT_PAGE_ALIGNED(address) \
ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-#define ASSERT_OBJECT_ALIGNED(address) \
+#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-#define ASSERT_MAP_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_SIZE(size) \
+#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
-#define ASSERT_PAGE_OFFSET(offset) \
- ASSERT((Page::kObjectStartOffset <= offset) \
+#define ASSERT_PAGE_OFFSET(offset) \
+ ASSERT((Page::kObjectStartOffset <= offset) \
&& (offset <= Page::kPageSize))
-#define ASSERT_MAP_PAGE_INDEX(index) \
+#define ASSERT_MAP_PAGE_INDEX(index) \
ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
@@ -109,8 +106,11 @@ class AllocationInfo;
// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The excact encoding is described in the comments for
-// class MapWord in objects.h.
+// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
+// 8K) in total. Because a map pointer is aligned to the pointer size (4
+// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
+// page index + 11 for the offset in the page) are required to encode a map
+// pointer.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -212,6 +212,9 @@ class Page {
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
+ // 8K bytes per page.
+ static const int kPageSizeBits = 13;
+
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -511,7 +514,7 @@ class MemoryAllocator : public AllStatic {
#endif
// Due to encoding limitation, we can only have 8K chunks.
- static const int kMaxNofChunks = 1 << kPageSizeBits;
+ static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
// 8K * 8K * 16 = 1G bytes.
#ifdef V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 9ab83beba1..51d9ddb8fa 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -120,7 +120,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
- code = compiler.CompileLoadCallback(name, receiver, holder, callback);
+ code = compiler.CompileLoadCallback(receiver, holder, callback, name);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -831,7 +831,7 @@ static Object* ThrowReferenceError(String* name) {
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return Heap::undefined_value();
+ if (!ic.is_contextual()) return Heap::undefined_value();
// Throw a reference error.
HandleScope scope;
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 2418c1f766..788c5324fa 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -405,7 +405,7 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
- bool GenerateLoadCallback(JSObject* object,
+ void GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -413,8 +413,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss,
- Failure** failure);
+ Label* miss);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,
@@ -448,10 +447,10 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder,
int index,
String* name);
- Object* CompileLoadCallback(String* name,
- JSObject* object,
+ Object* CompileLoadCallback(JSObject* object,
JSObject* holder,
- AccessorInfo* callback);
+ AccessorInfo* callback,
+ String* name);
Object* CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
diff --git a/deps/v8/src/token.cc b/deps/v8/src/token.cc
index 8cee99bb8b..0a4ad4c1ad 100644
--- a/deps/v8/src/token.cc
+++ b/deps/v8/src/token.cc
@@ -32,11 +32,13 @@
namespace v8 {
namespace internal {
+#ifdef DEBUG
#define T(name, string, precedence) #name,
const char* Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
+#endif
#define T(name, string, precedence) string,
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 2a228d67cf..a60704cd07 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -66,9 +66,8 @@ namespace internal {
T(DEC, "--", 0) \
\
/* Assignment operators. */ \
- /* IsAssignmentOp() and Assignment::is_compound() relies on */ \
- /* this block of enum values being contiguous and sorted in the */ \
- /* same order! */ \
+ /* IsAssignmentOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
@@ -212,12 +211,14 @@ class Token {
};
#undef T
+#ifdef DEBUG
// Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT).
static const char* Name(Value tok) {
ASSERT(0 <= tok && tok < NUM_TOKENS);
return name_[tok];
}
+#endif
// Predicates
static bool IsAssignmentOp(Value tok) {
@@ -260,7 +261,9 @@ class Token {
}
private:
+#ifdef DEBUG
static const char* name_[NUM_TOKENS];
+#endif
static const char* string_[NUM_TOKENS];
static int8_t precedence_[NUM_TOKENS];
};
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 158824d0e2..d6f53fab13 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -74,6 +74,8 @@ namespace internal {
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(symbol_table_capacity, V8.SymbolTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
+ /* Current amount of memory in external string buffers. */ \
+ SC(total_external_string_memory, V8.TotalExternalStringMemory) \
SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \
SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index a664095580..8f9adcbb8d 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -41,7 +41,6 @@
const $isNaN = GlobalIsNaN;
const $isFinite = GlobalIsFinite;
-
// ----------------------------------------------------------------------------
@@ -88,7 +87,7 @@ function GlobalIsFinite(number) {
// ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) {
- if (IS_UNDEFINED(radix)) {
+ if (radix === void 0) {
// Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a
@@ -281,207 +280,6 @@ function ObjectKeys(obj) {
}
-// ES5 8.10.1.
-function IsAccessorDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasGetter_ || desc.hasSetter_;
-}
-
-
-// ES5 8.10.2.
-function IsDataDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasValue_ || desc.hasWritable_;
-}
-
-
-// ES5 8.10.3.
-function IsGenericDescriptor(desc) {
- return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
-}
-
-
-function IsInconsistentDescriptor(desc) {
- return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
-}
-
-
-// ES5 8.10.5.
-function ToPropertyDescriptor(obj) {
- if (!IS_OBJECT(obj)) {
- throw MakeTypeError("property_desc_object", [obj]);
- }
- var desc = new PropertyDescriptor();
-
- if ("enumerable" in obj) {
- desc.setEnumerable(ToBoolean(obj.enumerable));
- }
-
-
- if ("configurable" in obj) {
- desc.setConfigurable(ToBoolean(obj.configurable));
- }
-
- if ("value" in obj) {
- desc.setValue(obj.value);
- }
-
- if ("writable" in obj) {
- desc.setWritable(ToBoolean(obj.writable));
- }
-
- if ("get" in obj) {
- var get = obj.get;
- if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
- throw MakeTypeError("getter_must_be_callable", [get]);
- }
- desc.setGet(get);
- }
-
- if ("set" in obj) {
- var set = obj.set;
- if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
- throw MakeTypeError("setter_must_be_callable", [set]);
- }
- desc.setSet(set);
- }
-
- if (IsInconsistentDescriptor(desc)) {
- throw MakeTypeError("value_and_accessor", [obj]);
- }
- return desc;
-}
-
-
-function PropertyDescriptor() {
- // Initialize here so they are all in-object and have the same map.
- // Default values from ES5 8.6.1.
- this.value_ = void 0;
- this.hasValue_ = false;
- this.writable_ = false;
- this.hasWritable_ = false;
- this.enumerable_ = false;
- this.configurable_ = false;
- this.get_ = void 0;
- this.hasGetter_ = false;
- this.set_ = void 0;
- this.hasSetter_ = false;
-}
-
-
-PropertyDescriptor.prototype.setValue = function(value) {
- this.value_ = value;
- this.hasValue_ = true;
-}
-
-
-PropertyDescriptor.prototype.getValue = function() {
- return this.value_;
-}
-
-
-PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
- this.enumerable_ = enumerable;
-}
-
-
-PropertyDescriptor.prototype.isEnumerable = function () {
- return this.enumerable_;
-}
-
-
-PropertyDescriptor.prototype.setWritable = function(writable) {
- this.writable_ = writable;
- this.hasWritable_ = true;
-}
-
-
-PropertyDescriptor.prototype.isWritable = function() {
- return this.writable_;
-}
-
-
-PropertyDescriptor.prototype.setConfigurable = function(configurable) {
- this.configurable_ = configurable;
-}
-
-
-PropertyDescriptor.prototype.isConfigurable = function() {
- return this.configurable_;
-}
-
-
-PropertyDescriptor.prototype.setGet = function(get) {
- this.get_ = get;
- this.hasGetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getGet = function() {
- return this.get_;
-}
-
-
-PropertyDescriptor.prototype.setSet = function(set) {
- this.set_ = set;
- this.hasSetter_ = true;
-}
-
-
-PropertyDescriptor.prototype.getSet = function() {
- return this.set_;
-}
-
-
-// ES5 8.12.9. This version cannot cope with the property p already
-// being present on obj.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
- if (IsDataDescriptor(desc)) {
- flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
- %SetProperty(obj, p, desc.getValue(), flag);
- } else {
- if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
- if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.5.
-function ObjectCreate(proto, properties) {
- if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
- throw MakeTypeError("proto_object_or_null", [proto]);
- }
- var obj = new $Object();
- obj.__proto__ = proto;
- if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
- return obj;
-}
-
-
-// ES5 section 15.2.3.7. This version cannot cope with the properies already
-// being present on obj. Therefore it is not exposed as
-// Object.defineProperties yet.
-function ObjectDefineProperties(obj, properties) {
- var props = ToObject(properties);
- var key_values = [];
- for (var key in props) {
- if (%HasLocalProperty(props, key)) {
- key_values.push(key);
- var value = props[key];
- var desc = ToPropertyDescriptor(value);
- key_values.push(desc);
- }
- }
- for (var i = 0; i < key_values.length; i += 2) {
- var key = key_values[i];
- var desc = key_values[i + 1];
- DefineOwnProperty(obj, key, desc, true);
- }
-}
-
-
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -511,8 +309,7 @@ function SetupObject() {
"__lookupSetter__", ObjectLookupSetter
));
InstallFunctions($Object, DONT_ENUM, $Array(
- "keys", ObjectKeys,
- "create", ObjectCreate
+ "keys", ObjectKeys
));
}
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 3bcd48a8b2..d9a78a5e7d 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -86,10 +86,10 @@ void UseCount::Print() {
// ----------------------------------------------------------------------------
-// Implementation StaticType.
+// Implementation SmiAnalysis.
-const char* StaticType::Type2String(StaticType* type) {
+const char* SmiAnalysis::Type2String(SmiAnalysis* type) {
switch (type->kind_) {
case UNKNOWN:
return "UNKNOWN";
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index ac7f2940dd..ca78b5fc1a 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -65,14 +65,14 @@ class UseCount BASE_EMBEDDED {
// Variables and AST expression nodes can track their "type" to enable
// optimizations and removal of redundant checks when generating code.
-class StaticType {
+class SmiAnalysis {
public:
enum Kind {
UNKNOWN,
LIKELY_SMI
};
- StaticType() : kind_(UNKNOWN) {}
+ SmiAnalysis() : kind_(UNKNOWN) {}
bool Is(Kind kind) const { return kind_ == kind; }
@@ -80,11 +80,11 @@ class StaticType {
bool IsUnknown() const { return Is(UNKNOWN); }
bool IsLikelySmi() const { return Is(LIKELY_SMI); }
- void CopyFrom(StaticType* other) {
+ void CopyFrom(SmiAnalysis* other) {
kind_ = other->kind_;
}
- static const char* Type2String(StaticType* type);
+ static const char* Type2String(SmiAnalysis* type);
// LIKELY_SMI accessors
void SetAsLikelySmi() {
@@ -100,7 +100,7 @@ class StaticType {
private:
Kind kind_;
- DISALLOW_COPY_AND_ASSIGN(StaticType);
+ DISALLOW_COPY_AND_ASSIGN(SmiAnalysis);
};
@@ -203,7 +203,7 @@ class Variable: public ZoneObject {
Expression* rewrite() const { return rewrite_; }
Slot* slot() const;
- StaticType* type() { return &type_; }
+ SmiAnalysis* type() { return &type_; }
private:
Scope* scope_;
@@ -220,7 +220,7 @@ class Variable: public ZoneObject {
UseCount obj_uses_; // uses of the object the variable points to
// Static type information
- StaticType type_;
+ SmiAnalysis type_;
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index d43a76159a..36a42f8f30 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 0
-#define BUILD_NUMBER 5
+#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 85ccb676ca..36f0e635ff 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -4051,8 +4051,7 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
+ Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
frame_->Push(&answer);
}
@@ -5127,7 +5126,7 @@ void DeferredInlineBinaryOperation::Generate() {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- StaticType* type,
+ SmiAnalysis* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@@ -5316,7 +5315,7 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
- StaticType* type,
+ SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@@ -6099,7 +6098,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- StaticType* key_smi_analysis = property->key()->type();
+ SmiAnalysis* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@@ -7372,28 +7371,19 @@ void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- use_sse3_ ? "SSE3" : "SSE2");
- return name_;
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
}
@@ -7806,8 +7796,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &string1);
// First and second argument are strings.
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&stub);
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
+ __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
// Only first argument is a string.
__ bind(&string1);
@@ -7890,234 +7880,6 @@ int CompareStub::MinorKey() {
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
}
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ testl(rcx, rcx);
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
- __ testl(rbx, rbx);
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // ecx: length of second string
- // edx: second string
- // r8: instance type of first string if string check was performed above
- // r9: instance type of first string if string check was performed above
- Label string_add_flat_result;
- __ bind(&both_not_zero_length);
- // Look at the length of the result of adding the two strings.
- __ addl(rbx, rcx);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmpl(rbx, Immediate(2));
- __ j(equal, &string_add_runtime);
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (!string_check_) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
- // Check if resulting string will be flat.
- __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ cmpl(rbx, Immediate(String::kMaxLength));
- __ j(above, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
- // rax: first string
- // ebx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(rcx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // Allocate a two byte cons string.
- __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // rax: first string
- // ebx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&string_add_flat_result);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // rax: first string
- // ebx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii_string_add_flat_result;
- ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(r8, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kAsciiStringTag));
- __ j(zero, &string_add_runtime);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&non_ascii_string_add_flat_result);
- __ and_(r9, Immediate(kAsciiStringTag));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
-}
-
-
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ subl(count, Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
#undef __
#define __ masm.
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index fdace8d59d..8539884aaf 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -436,7 +436,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
- StaticType* type,
+ SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@@ -449,7 +449,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
- StaticType* type,
+ SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
@@ -670,8 +670,7 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode),
flags_(flags),
args_in_registers_(false),
- args_reversed_(false),
- name_(NULL) {
+ args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -690,7 +689,6 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
- char* name_;
const char* GetName();
@@ -747,36 +745,6 @@ class GenericBinaryOpStub: public CodeStub {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- bool ascii);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index 32c975c2f6..f73f2b90f9 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -420,97 +420,73 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(rbp, SlotOffset(var->slot())));
- }
- break;
-
- case Slot::CONTEXT:
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT(slot != NULL); // No global declarations here.
+
+ // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
+ switch (slot->type()) {
+ case Slot::LOOKUP: {
+ __ push(rsi);
+ __ Push(var->name());
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
+ PropertyAttributes attr = decl->mode() == Variable::VAR ?
+ NONE : READ_ONLY;
+ __ Push(Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ Push(Factory::the_hole_value());
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ Push(Smi::FromInt(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ Move(Operand(rbp, SlotOffset(var->slot())),
+ Factory::the_hole_value());
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(rbp, SlotOffset(var->slot())));
+ }
+ break;
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
+ if (decl->mode() == Variable::CONST) {
+ __ Move(rax, Factory::the_hole_value());
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
- __ movq(rbx,
- CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ movq(rbx, CodeGenerator::ContextOperand(rsi,
+ Context::FCONTEXT_INDEX));
__ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context.");
}
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
- kScratchRegister);
- // No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(rax);
- __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(rsi, offset, rax, rcx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(rsi);
- __ Push(var->name());
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ Push(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- } else {
- __ Push(Smi::FromInt(0)); // no initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+ // No write barrier since the_hole_value is in old space.
+ ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(rax);
- } else {
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ movq(rbx, CodeGenerator::ContextOperand(rsi,
+ Context::FCONTEXT_INDEX));
+ __ cmpq(rbx, rsi);
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(rsi, offset, rax, rcx);
}
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
-
- // Absence of a test rax instruction following the call
- // indicates that none of the load was inlined.
-
- // Value in rax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -525,6 +501,20 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ Expression* expr = stmt->expression();
+ if (expr->AsLiteral() != NULL) {
+ __ Move(rax, expr->AsLiteral()->handle());
+ } else {
+ Visit(expr);
+ ASSERT_EQ(Expression::kValue, expr->context());
+ __ pop(rax);
+ }
+ EmitReturnSequence(stmt->statement_pos());
+}
+
+
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -545,20 +535,14 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
+ Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
- ASSERT(var->is_global());
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
- __ Move(rcx, var->name());
+ __ Move(rcx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
@@ -566,7 +550,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// is no test rax instruction here.
__ nop();
- DropAndMove(context, rax);
+ DropAndMove(expr->context(), rax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@@ -587,7 +571,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
- Move(context, slot, rax);
+ Move(expr->context(), slot, rax);
} else {
// A variable has been rewritten into an explicit access to
// an object property.
@@ -621,7 +605,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// the call. It is treated specially by the LoadIC code.
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(context, rax, 2);
+ DropAndMove(expr->context(), rax, 2);
}
}
@@ -655,14 +639,31 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ movq(rax, FieldOperand(rbx, literal_offset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(rbx);
+ // Literal index (1).
__ Push(Smi::FromInt(expr->literal_index()));
+ // Constant properties (2).
__ Push(expr->constant_properties());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // rax contains boilerplate.
+ // Clone boilerplate.
+ __ push(rax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@@ -758,14 +759,31 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ Label make_clone;
+
+ // Fetch the function's literals array.
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ // Check if the literal's boilerplate has been instantiated.
+ int offset =
+ FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+ __ movq(rax, FieldOperand(rbx, offset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &make_clone);
+
+ // Instantiate the boilerplate.
+ __ push(rbx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->literals());
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+ __ bind(&make_clone);
+ // Clone the boilerplate.
+ __ push(rax);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -835,37 +853,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, rax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, rax);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Move(context, rax);
-}
-
-
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
@@ -970,6 +961,36 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ Property* property = var->AsProperty();
+ ASSERT_NOT_NULL(property);
+ // A variable has been rewritten into a property on an object.
+
+ // Load object and key onto the stack.
+ Slot* object_slot = property->obj()->AsSlot();
+ ASSERT_NOT_NULL(object_slot);
+ Move(Expression::kValue, object_slot, rax);
+
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ Move(Expression::kValue, key_literal);
+
+ // Value to store was pushed before object and key on the stack.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+
+ // Arguments to ic is value in rax, object and key on stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ if (expr->context() == Expression::kEffect) {
+ __ addq(rsp, Immediate(3 * kPointerSize));
+ } else if (expr->context() == Expression::kValue) {
+ // Value is still on the stack in rsp[2 * kPointerSize]
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ } else {
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ DropAndMove(expr->context(), rax, 3);
+ }
}
}
@@ -1076,9 +1097,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
- RelocInfo::Mode mode) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1091,7 +1110,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ call(ic, mode);
+ __ call(ic, reloc_info);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -1130,7 +1149,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ Push(var->name());
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1143,7 +1162,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to a named property, use call IC.
__ Push(key->handle());
Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@@ -1665,69 +1684,6 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), rax);
-}
-
-
-Register FastCodeGenerator::result_register() { return rax; }
-
-
-Register FastCodeGenerator::context_register() { return rsi; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset),
- static_cast<intptr_t>(frame_offset));
- __ movq(Operand(rbp, frame_offset), value);
-}
-
-
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FastCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ movq(rdx, Operand(rsp, 0));
- __ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rsp, 0), rdx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
-
-
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ movq(rdx, Operand(rsp, 0));
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
- __ movq(Operand(rsp, 0), rdx);
- // And return.
- __ ret(0);
-}
-
-
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
#undef __
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index e24007559f..ccbc615bdf 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -48,13 +48,9 @@ namespace internal {
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register r0,
- Register r1,
- Register r2,
- Register name,
- DictionaryCheck check_dictionary) {
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register r0, Register r1, Register r2,
+ Register name) {
// Register use:
//
// r0 - used to hold the property dictionary.
@@ -90,14 +86,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, miss_label);
- // Load properties array.
+ // Check that the properties array is a dictionary.
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
-
- if (check_dictionary == CHECK_DICTIONARY) {
- // Check that the properties array is a dictionary.
- __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
- __ j(not_equal, miss_label);
- }
+ __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ j(not_equal, miss_label);
// Compute the capacity mask.
const int kCapacityOffset =
@@ -254,8 +246,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string;
- Label check_pixel_array, probe_dictionary;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@@ -328,68 +319,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
- // Is the string a symbol?
+ // If the string is a symbol, do a quick inline probe of the receiver's
+ // dictionary, if it exists.
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movl(rdx, rbx);
- __ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rax, Immediate(String::kHashShift));
- __ xor_(rdx, rax);
- __ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys();
- __ movq(rdi, rdx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ movq(kScratchRegister, cache_keys);
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
- __ j(not_equal, &slow);
- __ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
- __ cmpq(Operand(rsp, kPointerSize), rdi);
- __ j(not_equal, &slow);
-
- // Get field offset which is a 32-bit integer and check that it is
- // an in-object property.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
- __ movq(kScratchRegister, cache_field_offsets);
- __ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ cmpq(rax, rdx);
- __ j(above_equal, &slow);
-
- // Load in-object property.
- __ subq(rax, rdx);
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rax, rdx);
- __ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- GenerateDictionaryLoad(masm,
- &slow,
- rbx,
- rcx,
- rdx,
- rax,
- DICTIONARY_CHECK_DONE);
+ // Probe the dictionary leaving result in rcx.
+ GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@@ -916,7 +853,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ ExternalReference const& f) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack.
@@ -933,7 +872,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ movq(rax, Immediate(2));
- __ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
+ __ movq(rbx, f);
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@@ -1024,7 +963,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -1032,8 +971,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
- // Search dictionary - put result in register rdx.
- GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
+ // Search dictionary - put result in register edx.
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@@ -1126,7 +1065,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- GenerateMiss(masm, argc);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -1257,9 +1196,9 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
- // Search the dictionary placing the result in rax.
+ // Search the dictionary placing the result in eax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
__ ret(0);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5cf09f2675..71157914c2 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -310,12 +310,6 @@ void MacroAssembler::CallStub(CodeStub* stub) {
}
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -1345,13 +1339,6 @@ void MacroAssembler::Push(Smi* source) {
}
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
void MacroAssembler::Test(const Operand& src, Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@@ -1438,16 +1425,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
-void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- // Unlink this handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- pop(Operand(kScratchRegister, 0));
- // Remove the remaining fields.
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
void MacroAssembler::Ret() {
ret(0);
}
@@ -2267,108 +2244,6 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- movl(FieldOperand(result, String::kLengthOffset), length);
- movl(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate ascii string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- movl(FieldOperand(result, String::kLengthOffset), length);
- movl(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 97200051a3..9e7c25c955 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -400,7 +400,7 @@ class MacroAssembler: public Assembler {
void Test(const Operand& dst, Smi* source);
// ---------------------------------------------------------------------------
- // Macro instructions.
+ // Macro instructions
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
@@ -412,8 +412,6 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
- void Drop(int stack_elements);
- void Call(Label* target) { call(target); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -445,8 +443,6 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -522,32 +518,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* gc_required);
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -587,9 +557,6 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub);
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 32bd3450fb..55b0b87cdf 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -956,24 +956,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
- __ j(not_equal, &miss);
- } else {
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, &miss);
- }
+ __ Cmp(rdi, Handle<JSFunction>(function));
+ __ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@@ -1003,10 +987,10 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
- AccessorInfo* callback) {
+ AccessorInfo* callback,
+ String* name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1015,11 +999,8 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1173,11 +1154,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
- callback, name, &miss, &failure);
- if (!success) return failure;
-
+ GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1632,7 +1610,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
+void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -1640,8 +1618,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss,
- Failure** failure) {
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
@@ -1664,8 +1641,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
-
- return true;
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 3e3c9578e3..6d6c174fd0 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -447,40 +447,6 @@ THREADED_TEST(UsingExternalAsciiString) {
}
-THREADED_TEST(ScavengeExternalString) {
- TestResource::dispose_count = 0;
- {
- v8::HandleScope scope;
- uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string =
- String::NewExternal(new TestResource(two_byte_string));
- i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- i::Heap::CollectGarbage(0, i::NEW_SPACE);
- CHECK(i::Heap::InNewSpace(*istring));
- CHECK_EQ(0, TestResource::dispose_count);
- }
- i::Heap::CollectGarbage(0, i::NEW_SPACE);
- CHECK_EQ(1, TestResource::dispose_count);
-}
-
-
-THREADED_TEST(ScavengeExternalAsciiString) {
- TestAsciiResource::dispose_count = 0;
- {
- v8::HandleScope scope;
- const char* one_byte_string = "test string";
- Local<String> string = String::NewExternal(
- new TestAsciiResource(i::StrDup(one_byte_string)));
- i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- i::Heap::CollectGarbage(0, i::NEW_SPACE);
- CHECK(i::Heap::InNewSpace(*istring));
- CHECK_EQ(0, TestAsciiResource::dispose_count);
- }
- i::Heap::CollectGarbage(0, i::NEW_SPACE);
- CHECK_EQ(1, TestAsciiResource::dispose_count);
-}
-
-
THREADED_TEST(StringConcat) {
{
v8::HandleScope scope;
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index cad1ba3ae5..5b7219301e 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -3141,39 +3141,6 @@ TEST(DisableBreak) {
CheckDebuggerUnloaded();
}
-static const char* kSimpleExtensionSource =
- "(function Foo() {"
- " return 4;"
- "})() ";
-
-// http://crbug.com/28933
-// Test that debug break is disabled when bootstrapper is active.
-TEST(NoBreakWhenBootstrapping) {
- v8::HandleScope scope;
-
- // Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetDebugEventListener(DebugEventCounter);
-
- // Set the debug break flag.
- v8::Debug::DebugBreak();
- break_point_hit_count = 0;
- {
- // Create a context with an extension to make sure that some JavaScript
- // code is executed during bootstrapping.
- v8::RegisterExtension(new v8::Extension("simpletest",
- kSimpleExtensionSource));
- const char* extension_names[] = { "simpletest" };
- v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
- context.Dispose();
- }
- // Check that no DebugBreak events occured during the context creation.
- CHECK_EQ(0, break_point_hit_count);
-
- // Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
-}
static v8::Handle<v8::Array> NamedEnum(const v8::AccessorInfo&) {
v8::Handle<v8::Array> result = v8::Array::New(3);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 3b8905bb7c..511b933a50 100755
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -91,14 +91,14 @@ typedef int (*F0)();
TEST(Smi) {
// Check that C++ Smi operations work as expected.
- int64_t test_numbers[] = {
+ intptr_t test_numbers[] = {
0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
- Smi::kMaxValue, static_cast<int64_t>(Smi::kMaxValue) + 1,
- Smi::kMinValue, static_cast<int64_t>(Smi::kMinValue) - 1
+ Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
+ Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
};
int test_number_count = 15;
for (int i = 0; i < test_number_count; i++) {
- int64_t number = test_numbers[i];
+ intptr_t number = test_numbers[i];
bool is_valid = Smi::IsValid(number);
bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
CHECK_EQ(is_in_range, is_valid);
@@ -108,8 +108,8 @@ TEST(Smi) {
Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr);
}
- int64_t smi_value = smi_from_intptr->value();
- CHECK_EQ(number, smi_value);
+ int smi_value = smi_from_intptr->value();
+ CHECK_EQ(number, static_cast<intptr_t>(smi_value));
}
}
}
diff --git a/deps/v8/test/mjsunit/compiler/thisfunction.js b/deps/v8/test/mjsunit/compiler/thisfunction.js
deleted file mode 100644
index 2af846f3e0..0000000000
--- a/deps/v8/test/mjsunit/compiler/thisfunction.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --always_fast_compiler
-
-// Test reference to this-function.
-
-var g = (function f(x) {
- if (x == 1) return 42; else return f(1);
- })(0);
-assertEquals(42, g);
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index cd5066767b..f495c72787 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -129,6 +129,7 @@ var knownProblems = {
"Log": true,
"DeclareGlobals": true,
+ "CollectStackTrace": true,
"PromoteScheduledException": true,
"DeleteHandleScopeExtensions": true
};
diff --git a/deps/v8/test/mjsunit/math-min-max.js b/deps/v8/test/mjsunit/math-min-max.js
index f9475d6fa5..0ed991203c 100644
--- a/deps/v8/test/mjsunit/math-min-max.js
+++ b/deps/v8/test/mjsunit/math-min-max.js
@@ -25,48 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-
// Test Math.min().
-assertEquals(Infinity, Math.min());
+assertEquals(Number.POSITIVE_INFINITY, Math.min());
assertEquals(1, Math.min(1));
assertEquals(1, Math.min(1, 2));
assertEquals(1, Math.min(2, 1));
assertEquals(1, Math.min(1, 2, 3));
assertEquals(1, Math.min(3, 2, 1));
assertEquals(1, Math.min(2, 3, 1));
-assertEquals(1.1, Math.min(1.1, 2.2, 3.3));
-assertEquals(1.1, Math.min(3.3, 2.2, 1.1));
-assertEquals(1.1, Math.min(2.2, 3.3, 1.1));
-
-// Prepare a non-Smi zero value.
-function returnsNonSmi(){ return 0.25; }
-var ZERO = returnsNonSmi() - returnsNonSmi();
-assertEquals(0, ZERO);
-assertEquals(Infinity, 1/ZERO);
-assertEquals(-Infinity, 1/-ZERO);
-assertFalse(%_IsSmi(ZERO));
-assertFalse(%_IsSmi(-ZERO));
var o = {};
o.valueOf = function() { return 1; };
assertEquals(1, Math.min(2, 3, '1'));
assertEquals(1, Math.min(3, o, 2));
-assertEquals(1, Math.min(o, 2));
-assertEquals(-Infinity, Infinity / Math.min(-0, +0));
-assertEquals(-Infinity, Infinity / Math.min(+0, -0));
-assertEquals(-Infinity, Infinity / Math.min(+0, -0, 1));
-assertEquals(-Infinity, Infinity / Math.min(-0, ZERO));
-assertEquals(-Infinity, Infinity / Math.min(ZERO, -0));
-assertEquals(-Infinity, Infinity / Math.min(ZERO, -0, 1));
+assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(-0, +0));
+assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0));
+assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0, 1));
assertEquals(-1, Math.min(+0, -0, -1));
assertEquals(-1, Math.min(-1, +0, -0));
assertEquals(-1, Math.min(+0, -1, -0));
assertEquals(-1, Math.min(-0, -1, +0));
-assertNaN(Math.min('oxen'));
-assertNaN(Math.min('oxen', 1));
-assertNaN(Math.min(1, 'oxen'));
+
// Test Math.max().
@@ -78,28 +58,15 @@ assertEquals(2, Math.max(2, 1));
assertEquals(3, Math.max(1, 2, 3));
assertEquals(3, Math.max(3, 2, 1));
assertEquals(3, Math.max(2, 3, 1));
-assertEquals(3.3, Math.max(1.1, 2.2, 3.3));
-assertEquals(3.3, Math.max(3.3, 2.2, 1.1));
-assertEquals(3.3, Math.max(2.2, 3.3, 1.1));
var o = {};
o.valueOf = function() { return 3; };
assertEquals(3, Math.max(2, '3', 1));
assertEquals(3, Math.max(1, o, 2));
-assertEquals(3, Math.max(o, 1));
-assertEquals(Infinity, Infinity / Math.max(-0, +0));
-assertEquals(Infinity, Infinity / Math.max(+0, -0));
-assertEquals(Infinity, Infinity / Math.max(+0, -0, -1));
-assertEquals(Infinity, Infinity / Math.max(-0, ZERO));
-assertEquals(Infinity, Infinity / Math.max(ZERO, -0));
-assertEquals(Infinity, Infinity / Math.max(ZERO, -0, -1));
+assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(-0, +0));
+assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0));
+assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0, -1));
assertEquals(1, Math.max(+0, -0, +1));
assertEquals(1, Math.max(+1, +0, -0));
assertEquals(1, Math.max(+0, +1, -0));
-assertEquals(1, Math.max(-0, +1, +0));
-assertNaN(Math.max('oxen'));
-assertNaN(Math.max('oxen', 1));
-assertNaN(Math.max(1, 'oxen'));
-
-assertEquals(Infinity, 1/Math.max(ZERO, -0));
-assertEquals(Infinity, 1/Math.max(-0, ZERO));
+assertEquals(1, Math.max(-0, +1, +0)); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 41388a37f8..8eb59b7e2f 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -45,8 +45,6 @@ array-constructor: PASS || TIMEOUT
# Very slow on ARM, contains no architecture dependent code.
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
-# Skip long running test in debug.
-regress/regress-524: PASS, SKIP if $mode == debug
[ $arch == arm ]
diff --git a/deps/v8/test/mjsunit/object-create.js b/deps/v8/test/mjsunit/object-create.js
deleted file mode 100644
index d8385842a3..0000000000
--- a/deps/v8/test/mjsunit/object-create.js
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test ES5 sections 15.2.3.5 Object.create.
-// We do not support nonconfigurable properties on objects so that is not
-// tested. We do test getters, setters, writable, enumerable and value.
-
-// Check that no exceptions are thrown.
-Object.create(null);
-Object.create(null, undefined);
-
-// Check that the right exception is thrown.
-try {
- Object.create(4);
- assertTrue(false);
-} catch (e) {
- assertTrue(/Object or null/.test(e));
-}
-
-try {
- Object.create("foo");
- print(2);
- assertTrue(false);
-} catch (e) {
- assertTrue(/Object or null/.test(e));
-}
-
-var ctr = 0;
-var ctr2 = 0;
-var ctr3 = 0;
-var ctr4 = 0;
-var ctr5 = 0;
-var ctr6 = 1000;
-
-var protoFoo = { foo: function() { ctr++; }};
-var fooValue = { foo: { writable: true, value: function() { ctr2++; }}};
-var fooGetter = { foo: { get: function() { return ctr3++; }}};
-var fooSetter = { foo: { set: function() { return ctr4++; }}};
-var fooAmbiguous = { foo: { get: function() { return ctr3++; },
- value: 3 }};
-
-function valueGet() { ctr5++; return 3 };
-function getterGet() { ctr5++; return function() { return ctr6++; }; };
-
-// Simple object with prototype, no properties added.
-Object.create(protoFoo).foo();
-assertEquals(1, ctr);
-
-// Simple object with object with prototype, no properties added.
-Object.create(Object.create(protoFoo)).foo();
-assertEquals(2, ctr);
-
-// Add a property foo that returns a function.
-var v = Object.create(protoFoo, fooValue);
-v.foo();
-assertEquals(2, ctr);
-assertEquals(1, ctr2);
-
-// Ensure the property is writable.
-v.foo = 42;
-assertEquals(42, v.foo);
-assertEquals(2, ctr);
-assertEquals(1, ctr2);
-
-// Ensure by default properties are not writable.
-v = Object.create(null, { foo: {value: 103}});
-assertEquals(103, v.foo);
-v.foo = 42;
-assertEquals(103, v.foo);
-
-// Add a getter foo that returns a counter value.
-assertEquals(0, Object.create(protoFoo, fooGetter).foo);
-assertEquals(2, ctr);
-assertEquals(1, ctr2);
-assertEquals(1, ctr3);
-
-// Add a setter foo that runs a function.
-assertEquals(1, Object.create(protoFoo, fooSetter).foo = 1);
-assertEquals(2, ctr);
-assertEquals(1, ctr2);
-assertEquals(1, ctr3);
-assertEquals(1, ctr4);
-
-// Make sure that trying to add both a value and a getter
-// will result in an exception.
-try {
- Object.create(protoFoo, fooAmbiguous);
- assertTrue(false);
-} catch (e) {
- assertTrue(/Invalid property/.test(e));
-}
-assertEquals(2, ctr);
-assertEquals(1, ctr2);
-assertEquals(1, ctr3);
-assertEquals(1, ctr4);
-
-var ctr7 = 0;
-
-var metaProps = {
- enumerable: { get: function() {
- assertEquals(0, ctr7++);
- return true;
- }},
- configurable: { get: function() {
- assertEquals(1, ctr7++);
- return true;
- }},
- value: { get: function() {
- assertEquals(2, ctr7++);
- return 4;
- }},
- writable: { get: function() {
- assertEquals(3, ctr7++);
- return true;
- }},
- get: { get: function() {
- assertEquals(4, ctr7++);
- return function() { };
- }},
- set: { get: function() {
- assertEquals(5, ctr7++);
- return function() { };
- }}
-};
-
-
-// Instead of a plain props object, let's use getters to return its properties.
-var magicValueProps = { foo: Object.create(null, { value: { get: valueGet }})};
-var magicGetterProps = { foo: Object.create(null, { get: { get: getterGet }})};
-var magicAmbiguousProps = { foo: Object.create(null, metaProps) };
-
-assertEquals(3, Object.create(null, magicValueProps).foo);
-assertEquals(1, ctr5);
-
-assertEquals(1000, Object.create(null, magicGetterProps).foo);
-assertEquals(2, ctr5);
-
-// See if we do the steps in ToPropertyDescriptor in the right order.
-// We shouldn't throw the exception for an ambiguous properties object
-// before we got all the values out.
-try {
- Object.create(null, magicAmbiguousProps);
- assertTrue(false);
-} catch (e) {
- assertTrue(/Invalid property/.test(e));
- assertEquals(6, ctr7);
-}
-
-var magicWritableProps = {
- foo: Object.create(null, { value: { value: 4 },
- writable: { get: function() {
- ctr6++;
- return false;
- }}})};
-
-var fooNotWritable = Object.create(null, magicWritableProps)
-assertEquals(1002, ctr6);
-assertEquals(4, fooNotWritable.foo);
-fooNotWritable.foo = 5;
-assertEquals(4, fooNotWritable.foo);
-
-
-// Test enumerable flag.
-
-var fooNotEnumerable =
- Object.create({fizz: 14}, {foo: {value: 3, enumerable: false},
- bar: {value: 4, enumerable: true},
- baz: {value: 5}});
-var sum = 0;
-for (x in fooNotEnumerable) {
- assertTrue(x === 'bar' || x === 'fizz');
- sum += fooNotEnumerable[x];
-}
-assertEquals(18, sum);
-
-
-try {
- Object.create(null, {foo: { get: 0 }});
- assertTrue(false);
-} catch (e) {
- assertTrue(/Getter must be a function/.test(e));
-}
-
-try {
- Object.create(null, {foo: { set: 0 }});
- assertTrue(false);
-} catch (e) {
- assertTrue(/Setter must be a function/.test(e));
-}
-
-try {
- Object.create(null, {foo: { set: 0, get: 0 }});
- assertTrue(false);
-} catch (e) {
- assertTrue(/Getter must be a function/.test(e));
-}
-
-
-// Ensure that only enumerable own properties on the descriptor are used.
-var tricky = Object.create(
- { foo: { value: 1, enumerable: true }},
- { bar: { value: { value: 2, enumerable: true }, enumerable: false },
- baz: { value: { value: 4, enumerable: false }, enumerable: true },
- fizz: { value: { value: 8, enumerable: false }, enumerable: false },
- buzz: { value: { value: 16, enumerable: true }, enumerable: true }});
-
-assertEquals(1, tricky.foo.value);
-assertEquals(2, tricky.bar.value);
-assertEquals(4, tricky.baz.value);
-assertEquals(8, tricky.fizz.value);
-assertEquals(16, tricky.buzz.value);
-
-var sonOfTricky = Object.create(null, tricky);
-
-assertFalse("foo" in sonOfTricky);
-assertFalse("bar" in sonOfTricky);
-assertTrue("baz" in sonOfTricky);
-assertFalse("fizz" in sonOfTricky);
-assertTrue("buzz" in sonOfTricky);
-
-var sum = 0;
-for (x in sonOfTricky) {
- assertTrue(x === 'buzz');
- sum += sonOfTricky[x];
-}
-assertEquals(16, sum);
diff --git a/deps/v8/test/mjsunit/regress/regress-524.js b/deps/v8/test/mjsunit/regress/regress-524.js
deleted file mode 100644
index b37ad8ad7f..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-524.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test allocation of a large number of maps.
-
-var i = 500000
-var a = new Array(i)
-for (var j = 0; j < i; j++) { var o = {}; o.x = 42; delete o.x; a[j] = o; }
diff --git a/deps/v8/test/mjsunit/regress/regress-540.js b/deps/v8/test/mjsunit/regress/regress-540.js
deleted file mode 100644
index c40fa2cb17..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-540.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test context slot declarations in the arguments object.
-// See http://code.google.com/p/v8/issues/detail?id=540
-
-function f(x, y) { eval(x); return y(); }
-var result = f("function y() { return 1; }", function () { return 0; })
-assertEquals(1, result);
-
-result =
- (function (x) {
- function x() { return 3; }
- return x();
- })(function () { return 2; });
-assertEquals(3, result);
-
-result =
- (function (x) {
- function x() { return 5; }
- return arguments[0]();
- })(function () { return 4; });
-assertEquals(5, result);
diff --git a/deps/v8/test/mjsunit/regress/regress-545.js b/deps/v8/test/mjsunit/regress/regress-545.js
deleted file mode 100644
index 36cde6ddf3..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-545.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// See: http://code.google.com/p/v8/issues/detail?id=545
-// and: http://code.google.com/p/chromium/issues/detail?id=28353
-
-// The "this" variable proxy was reused. If context annotations differ between
-// uses, this can cause a use in a value context to assume a test context. Since
-// it has no true/false labels set, it causes a null-pointer dereference and
-// segmentation fault.
-
-// Code should not crash:
-
-// Original bug report by Robert Swiecki (wrapped to not throw):
-try {
- new IsPrimitive(load())?this.join():String('&#10;').charCodeAt((!this>Math));
-} catch (e) {}
-
-// Shorter examples:
-
-this + !this;
-
-this + (this ? 1 : 2);
diff --git a/deps/v8/test/mjsunit/try.js b/deps/v8/test/mjsunit/try.js
index 794860a7c6..0bd78b4332 100644
--- a/deps/v8/test/mjsunit/try.js
+++ b/deps/v8/test/mjsunit/try.js
@@ -347,48 +347,3 @@ assertTrue(broke);
assertFalse(caught);
assertTrue(finalized);
-function return_from_nested_finally_in_finally() {
- try {
- return 1;
- } finally {
- try {
- return 2;
- } finally {
- return 42;
- }
- }
-}
-
-assertEquals(42, return_from_nested_finally_in_finally());
-
-function break_from_nested_finally_in_finally() {
- L: try {
- return 1;
- } finally {
- try {
- return 2;
- } finally {
- break L;
- }
- }
- return 42;
-}
-
-assertEquals(42, break_from_nested_finally_in_finally());
-
-function continue_from_nested_finally_in_finally() {
- do {
- try {
- return 1;
- } finally {
- try {
- return 2;
- } finally {
- continue;
- }
- }
- } while (false);
- return 42;
-}
-
-assertEquals(42, continue_from_nested_finally_in_finally());
diff --git a/deps/v8/tools/stats-viewer.py b/deps/v8/tools/stats-viewer.py
index 14b214768f..bd6a8fb913 100755
--- a/deps/v8/tools/stats-viewer.py
+++ b/deps/v8/tools/stats-viewer.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -37,7 +35,6 @@ in a window, re-reading and re-displaying with regular intervals.
import mmap
import os
-import re
import struct
import sys
import time
@@ -52,9 +49,8 @@ UPDATE_INTERVAL_MS = 100
COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
-# The magic numbers used to check if a file is not a counters file
+# The magic number used to check if a file is not a counters file
COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
-CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
class StatsViewer(object):
@@ -96,31 +92,17 @@ class StatsViewer(object):
something goes wrong print an informative message and exit the
program."""
if not os.path.exists(self.data_name):
- maps_name = "/proc/%s/maps" % self.data_name
- if not os.path.exists(maps_name):
- print "\"%s\" is neither a counter file nor a PID." % self.data_name
- sys.exit(1)
- maps_file = open(maps_name, "r")
- try:
- m = re.search(r"/dev/shm/\S*", maps_file.read())
- if m is not None and os.path.exists(m.group(0)):
- self.data_name = m.group(0)
- else:
- print "Can't find counter file in maps for PID %s." % self.data_name
- sys.exit(1)
- finally:
- maps_file.close()
+ print "File %s doesn't exist." % self.data_name
+ sys.exit(1)
data_file = open(self.data_name, "r")
size = os.fstat(data_file.fileno()).st_size
fileno = data_file.fileno()
self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
data_access = SharedDataAccess(self.shared_mmap)
- if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
- return CounterCollection(data_access)
- elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
- return ChromeCounterCollection(data_access)
- print "File %s is not stats data." % self.data_name
- sys.exit(1)
+ if data_access.IntAt(0) != COUNTERS_FILE_MAGIC_NUMBER:
+ print "File %s is not stats data." % self.data_name
+ sys.exit(1)
+ return CounterCollection(data_access)
def CleanUp(self):
"""Cleans up the memory mapped file if necessary."""
@@ -374,72 +356,6 @@ class CounterCollection(object):
return 4 + self.max_name_size
-class ChromeCounter(object):
- """A pointer to a single counter withing a binary counters file."""
-
- def __init__(self, data, name_offset, value_offset):
- """Create a new instance.
-
- Args:
- data: the shared data access object containing the counter
- name_offset: the byte offset of the start of this counter's name
- value_offset: the byte offset of the start of this counter's value
- """
- self.data = data
- self.name_offset = name_offset
- self.value_offset = value_offset
-
- def Value(self):
- """Return the integer value of this counter."""
- return self.data.IntAt(self.value_offset)
-
- def Name(self):
- """Return the ascii name of this counter."""
- result = ""
- index = self.name_offset
- current = self.data.ByteAt(index)
- while current:
- result += chr(current)
- index += 1
- current = self.data.ByteAt(index)
- return result
-
-
-class ChromeCounterCollection(object):
- """An overlay over a counters file that provides access to the
- individual counters contained in the file."""
-
- _HEADER_SIZE = 4 * 4
- _NAME_SIZE = 32
-
- def __init__(self, data):
- """Create a new instance.
-
- Args:
- data: the shared data access object
- """
- self.data = data
- self.max_counters = data.IntAt(8)
- self.max_threads = data.IntAt(12)
- self.counter_names_offset = \
- self._HEADER_SIZE + self.max_threads * (self._NAME_SIZE + 2 * 4)
- self.counter_values_offset = \
- self.counter_names_offset + self.max_counters * self._NAME_SIZE
-
- def CountersInUse(self):
- """Return the number of counters in active use."""
- for i in xrange(self.max_counters):
- if self.data.ByteAt(self.counter_names_offset + i * self._NAME_SIZE) == 0:
- return i
- return self.max_counters
-
- def Counter(self, i):
- """Return the i'th counter."""
- return ChromeCounter(self.data,
- self.counter_names_offset + i * self._NAME_SIZE,
- self.counter_values_offset + i * self.max_threads * 4)
-
-
def Main(data_file):
"""Run the stats counter.
@@ -451,6 +367,6 @@ def Main(data_file):
if __name__ == "__main__":
if len(sys.argv) != 2:
- print "Usage: stats-viewer.py <stats data>|<test_shell pid>"
+ print "Usage: stats-viewer.py <stats data>"
sys.exit(1)
Main(sys.argv[1])