summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-03-10 10:50:46 -0800
committerRyan Dahl <ry@tinyclouds.org>2010-03-10 10:50:46 -0800
commit073947c150316cfc0bd440851e590663c3b67814 (patch)
tree892fc64c0d5cdfd021c14af01a631f6b6c91b982 /deps/v8
parentc2c0cfb75f46ddcb3a0900f244966764d2640240 (diff)
downloadnode-new-073947c150316cfc0bd440851e590663c3b67814.tar.gz
Upgrade V8 to 2.1.3
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/ChangeLog19
-rw-r--r--deps/v8/SConstruct40
-rw-r--r--deps/v8/include/v8.h36
-rwxr-xr-xdeps/v8/src/SConscript2
-rw-r--r--deps/v8/src/accessors.cc1
-rw-r--r--deps/v8/src/api.cc62
-rw-r--r--deps/v8/src/arm/assembler-arm.cc94
-rw-r--r--deps/v8/src/arm/assembler-arm.h172
-rw-r--r--deps/v8/src/arm/builtins-arm.cc4
-rw-r--r--deps/v8/src/arm/codegen-arm.cc347
-rw-r--r--deps/v8/src/arm/codegen-arm.h43
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1
-rw-r--r--deps/v8/src/arm/ic-arm.cc21
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc64
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h28
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc12
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc21
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h6
-rw-r--r--deps/v8/src/array.js2
-rw-r--r--deps/v8/src/assembler.cc10
-rw-r--r--deps/v8/src/assembler.h3
-rw-r--r--deps/v8/src/ast.cc5
-rw-r--r--deps/v8/src/ast.h61
-rw-r--r--deps/v8/src/bootstrapper.cc13
-rw-r--r--deps/v8/src/builtins.cc376
-rw-r--r--deps/v8/src/code-stubs.cc15
-rw-r--r--deps/v8/src/code-stubs.h11
-rw-r--r--deps/v8/src/codegen.cc5
-rw-r--r--deps/v8/src/compilation-cache.cc70
-rwxr-xr-xdeps/v8/src/compiler.cc38
-rw-r--r--deps/v8/src/compiler.h7
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/conversions-inl.h26
-rw-r--r--deps/v8/src/conversions.h3
-rw-r--r--deps/v8/src/data-flow.cc906
-rw-r--r--deps/v8/src/data-flow.h391
-rw-r--r--deps/v8/src/date-delay.js29
-rw-r--r--deps/v8/src/debug-delay.js50
-rw-r--r--deps/v8/src/debug.cc54
-rw-r--r--deps/v8/src/debug.h7
-rw-r--r--deps/v8/src/factory.h3
-rw-r--r--deps/v8/src/fast-codegen.h1
-rw-r--r--deps/v8/src/flag-definitions.h9
-rw-r--r--deps/v8/src/frame-element.cc1
-rw-r--r--deps/v8/src/frame-element.h33
-rw-r--r--deps/v8/src/frames.cc1
-rw-r--r--deps/v8/src/globals.h2
-rw-r--r--deps/v8/src/handles.cc15
-rw-r--r--deps/v8/src/handles.h8
-rw-r--r--deps/v8/src/heap-inl.h12
-rw-r--r--deps/v8/src/heap-profiler.cc1
-rw-r--r--deps/v8/src/heap-profiler.h2
-rw-r--r--deps/v8/src/heap.cc119
-rw-r--r--deps/v8/src/heap.h63
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc114
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h19
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc95
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1750
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h97
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc9
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc44
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc1
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc37
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc279
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc57
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h31
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc49
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h15
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32.cc1
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc19
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc154
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h20
-rw-r--r--deps/v8/src/ic.cc145
-rw-r--r--deps/v8/src/ic.h31
-rw-r--r--deps/v8/src/jsregexp.h1
-rw-r--r--deps/v8/src/jump-target-inl.h2
-rw-r--r--deps/v8/src/jump-target.cc13
-rw-r--r--deps/v8/src/jump-target.h1
-rw-r--r--deps/v8/src/liveedit-delay.js426
-rw-r--r--deps/v8/src/liveedit.cc404
-rw-r--r--deps/v8/src/liveedit.h28
-rw-r--r--deps/v8/src/log.cc34
-rw-r--r--deps/v8/src/log.h7
-rw-r--r--deps/v8/src/macros.py10
-rw-r--r--deps/v8/src/math.js8
-rw-r--r--deps/v8/src/messages.cc1
-rw-r--r--deps/v8/src/messages.js1
-rw-r--r--deps/v8/src/mips/codegen-mips.cc45
-rw-r--r--deps/v8/src/mips/codegen-mips.h14
-rw-r--r--deps/v8/src/mips/fast-codegen-mips.cc20
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc5
-rw-r--r--deps/v8/src/mips/ic-mips.cc25
-rw-r--r--deps/v8/src/mips/jump-target-mips.cc1
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc31
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h47
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc31
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.cc12
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.h19
-rw-r--r--deps/v8/src/number-info.h160
-rw-r--r--deps/v8/src/objects-debug.cc18
-rw-r--r--deps/v8/src/objects-inl.h39
-rw-r--r--deps/v8/src/objects.cc381
-rw-r--r--deps/v8/src/objects.h187
-rw-r--r--deps/v8/src/parser.cc43
-rw-r--r--deps/v8/src/platform-freebsd.cc6
-rw-r--r--deps/v8/src/platform-linux.cc10
-rw-r--r--deps/v8/src/platform-macos.cc5
-rw-r--r--deps/v8/src/platform-win32.cc5
-rw-r--r--deps/v8/src/prettyprinter.cc8
-rw-r--r--deps/v8/src/regexp-delay.js39
-rw-r--r--deps/v8/src/register-allocator-inl.h39
-rw-r--r--deps/v8/src/register-allocator.cc21
-rw-r--r--deps/v8/src/register-allocator.h21
-rw-r--r--deps/v8/src/rewriter.cc58
-rw-r--r--deps/v8/src/runtime.cc577
-rw-r--r--deps/v8/src/runtime.h14
-rw-r--r--deps/v8/src/runtime.js2
-rwxr-xr-xdeps/v8/src/scanner.cc105
-rw-r--r--deps/v8/src/scanner.h67
-rw-r--r--deps/v8/src/scopeinfo.cc4
-rw-r--r--deps/v8/src/scopeinfo.h1
-rw-r--r--deps/v8/src/scopes.cc23
-rw-r--r--deps/v8/src/serialize.cc42
-rw-r--r--deps/v8/src/spaces.cc7
-rw-r--r--deps/v8/src/splay-tree-inl.h276
-rw-r--r--deps/v8/src/splay-tree.h191
-rw-r--r--deps/v8/src/string.js12
-rw-r--r--deps/v8/src/top.cc1
-rw-r--r--deps/v8/src/usage-analyzer.cc426
-rw-r--r--deps/v8/src/utils.h60
-rw-r--r--deps/v8/src/v8-counters.h19
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/variables.cc58
-rw-r--r--deps/v8/src/variables.h38
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/virtual-frame-inl.h17
-rw-r--r--deps/v8/src/virtual-frame.cc18
-rw-r--r--deps/v8/src/x64/builtins-x64.cc4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc412
-rw-r--r--deps/v8/src/x64/codegen-x64.h52
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc1
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1
-rw-r--r--deps/v8/src/x64/ic-x64.cc21
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc63
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h28
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc6
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc12
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc41
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h14
-rw-r--r--deps/v8/src/zone-inl.h227
-rw-r--r--deps/v8/src/zone.cc1
-rw-r--r--deps/v8/src/zone.h96
-rw-r--r--deps/v8/test/cctest/SConscript1
-rw-r--r--deps/v8/test/cctest/cctest.status2
-rw-r--r--deps/v8/test/cctest/test-api.cc283
-rw-r--r--deps/v8/test/cctest/test-dataflow.cc103
-rw-r--r--deps/v8/test/cctest/test-debug.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc1
-rw-r--r--deps/v8/test/cctest/test-heap.cc370
-rw-r--r--deps/v8/test/cctest/test-log.cc69
-rw-r--r--deps/v8/test/cctest/test-serialize.cc28
-rw-r--r--deps/v8/test/mjsunit/array-elements-from-array-prototype-chain.js191
-rw-r--r--deps/v8/test/mjsunit/array-elements-from-array-prototype.js191
-rw-r--r--deps/v8/test/mjsunit/array-elements-from-object-prototype.js191
-rw-r--r--deps/v8/test/mjsunit/array-length.js22
-rw-r--r--deps/v8/test/mjsunit/array-slice.js11
-rw-r--r--deps/v8/test/mjsunit/array-splice.js59
-rw-r--r--deps/v8/test/mjsunit/array-unshift.js8
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-618.js45
-rw-r--r--deps/v8/test/mjsunit/date.js14
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-1.js48
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-2.js70
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-check-stack.js84
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js20
-rw-r--r--deps/v8/test/mjsunit/debug-script.js2
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js14
-rw-r--r--deps/v8/test/mjsunit/math-sqrt.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-634.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-636.js (renamed from deps/v8/src/usage-analyzer.h)22
-rw-r--r--deps/v8/test/mjsunit/string-charat.js13
-rw-r--r--deps/v8/test/mjsunit/string-index.js14
-rw-r--r--deps/v8/test/mjsunit/string-split-cache.js40
-rw-r--r--deps/v8/test/mjsunit/undeletable-functions.js14
-rw-r--r--deps/v8/tools/gyp/v8.gyp8
-rw-r--r--deps/v8/tools/visual_studio/js2c.cmd2
-rw-r--r--deps/v8/tools/visual_studio/v8.vcproj4
-rw-r--r--deps/v8/tools/visual_studio/v8_arm.vcproj4
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj8
-rw-r--r--deps/v8/tools/visual_studio/v8_x64.vcproj4
194 files changed, 10315 insertions, 3224 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 339fd18f59..4363b19995 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,21 @@
+2010-03-10: Version 2.1.3
+
+ Added API method for context-disposal notifications.
+
+ Added API method for accessing elements by integer index.
+
+ Added missing implementation of Uint32::Value and Value::IsUint32
+ API methods.
+
+ Added IsExecutionTerminating API method.
+
+ Disabled strict aliasing for GCC 4.4.
+
+ Fixed string-concatenation bug (issue 636).
+
+ Performance improvements on all platforms.
+
+
2010-02-23: Version 2.1.2
Fix a crash bug caused by wrong assert.
@@ -6,6 +24,7 @@
Performance improvements on all platforms.
+
2010-02-19: Version 2.1.1
[ES5] Implemented Object.defineProperty.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 5483663fd6..f7638e0b2a 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -46,8 +46,8 @@ if ANDROID_TOP is None:
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
- GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
- GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
+ GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing']
+ GCC_DTOA_EXTRA_CCFLAGS = []
else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
@@ -255,8 +255,16 @@ LIBRARY_FLAGS = {
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
- 'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
+ 'pgo:off': {
+ 'LINKFLAGS': ['/LTCG'],
+ },
+ 'pgo:instrument': {
+ 'LINKFLAGS': ['/LTCG:PGI']
+ },
+ 'pgo:optimize': {
+ 'LINKFLAGS': ['/LTCG:PGO']
+ }
}
}
}
@@ -267,6 +275,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -526,7 +535,15 @@ SAMPLE_FLAGS = {
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
- 'LINKFLAGS': ['/LTCG'],
+ 'pgo:off': {
+ 'LINKFLAGS': ['/LTCG'],
+ },
+ },
+ 'pgo:instrument': {
+ 'LINKFLAGS': ['/LTCG:PGI']
+ },
+ 'pgo:optimize': {
+ 'LINKFLAGS': ['/LTCG:PGO']
}
},
'arch:ia32': {
@@ -710,6 +727,11 @@ SIMPLE_OPTIONS = {
'values': ['arm', 'thumb2', 'none'],
'default': 'none',
'help': 'generate thumb2 instructions instead of arm instructions (default)'
+ },
+ 'pgo': {
+ 'values': ['off', 'instrument', 'optimize'],
+ 'default': 'off',
+ 'help': 'select profile guided optimization variant',
}
}
@@ -797,6 +819,8 @@ def VerifyOptions(env):
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
+ if env['os'] != 'win32' and env['pgo'] != 'off':
+ Abort("Profile guided optimization only supported on Windows.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
@@ -882,7 +906,7 @@ class BuildContext(object):
env['ENV'] = self.env_overrides
-def PostprocessOptions(options):
+def PostprocessOptions(options, os):
# Adjust architecture if the simulator option has been set
if (options['simulator'] != 'none') and (options['arch'] != options['simulator']):
if 'arch' in ARGUMENTS:
@@ -893,6 +917,10 @@ def PostprocessOptions(options):
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
+ if os == 'win32' and options['pgo'] != 'off' and options['msvcltcg'] == 'off':
+ if 'msvcltcg' in ARGUMENTS:
+ print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
+ options['msvcltcg'] = 'on'
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
@@ -923,7 +951,7 @@ def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
- PostprocessOptions(options)
+ PostprocessOptions(options, env['os'])
context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 69b93c69c0..882eeddf15 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -261,6 +261,10 @@ template <class T> class V8EXPORT_INLINE Handle {
return Handle<T>(T::Cast(*that));
}
+ template <class S> inline Handle<S> As() {
+ return Handle<S>::Cast(*this);
+ }
+
private:
T* val_;
};
@@ -295,6 +299,10 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
return Local<T>(T::Cast(*that));
}
+ template <class S> inline Local<S> As() {
+ return Local<S>::Cast(*this);
+ }
+
/** Create a local handle for the content of another handle.
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
@@ -368,6 +376,10 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
return Persistent<T>(T::Cast(*that));
}
+ template <class S> inline Persistent<S> As() {
+ return Persistent<S>::Cast(*this);
+ }
+
/**
* Creates a new persistent handle for an existing local or
* persistent handle.
@@ -538,13 +550,13 @@ class V8EXPORT Script {
* Compiles the specified script (context-independent).
*
* \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
+ * \param origin Script origin, owned by caller, no references are kept
* when New() returns
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
* Owned by caller, no references are kept when New() returns.
* \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but allows data to be
+ * this has same effect as calling SetData(), but allows data to be
* available to compile event handlers.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
@@ -559,7 +571,7 @@ class V8EXPORT Script {
* object (typically a string) as the script's origin.
*
* \param source Script source code.
- * \patam file_name file name object (typically a string) to be used
+ * \param file_name file name object (typically a string) to be used
* as the script's origin.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
@@ -571,7 +583,7 @@ class V8EXPORT Script {
* Compiles the specified script (bound to current context).
*
* \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
+ * \param origin Script origin, owned by caller, no references are kept
* when Compile() returns
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
@@ -755,6 +767,11 @@ class V8EXPORT Value : public Data {
bool IsInt32() const;
/**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsUint32() const;
+
+ /**
* Returns true if this value is a Date.
*/
bool IsDate() const;
@@ -1178,6 +1195,9 @@ class V8EXPORT Object : public Value {
Handle<Value> value,
PropertyAttribute attribs = None);
+ bool Set(uint32_t index,
+ Handle<Value> value);
+
// Sets a local property on this object bypassing interceptors and
// overriding accessors or read-only properties.
//
@@ -1192,6 +1212,8 @@ class V8EXPORT Object : public Value {
Local<Value> Get(Handle<Value> key);
+ Local<Value> Get(uint32_t index);
+
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
bool Has(Handle<String> key);
@@ -2485,9 +2507,11 @@ class V8EXPORT V8 {
/**
* Optional notification that a context has been disposed. V8 uses
- * these notifications to guide the garbage collection heuristic.
+ * these notifications to guide the GC heuristic. Returns the number
+ * of context disposals - including this one - since the last time
+ * V8 had a chance to clean up.
*/
- static void ContextDisposedNotification();
+ static int ContextDisposedNotification();
private:
V8();
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 73de193ad5..d61da3e078 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -97,7 +97,6 @@ SOURCES = {
token.cc
top.cc
unicode.cc
- usage-analyzer.cc
utils.cc
v8-counters.cc
v8.cc
@@ -249,6 +248,7 @@ math.js
messages.js
apinatives.js
debug-delay.js
+liveedit-delay.js
mirror-delay.js
date-delay.js
regexp-delay.js
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index b05719edb5..e41db94730 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -32,7 +32,6 @@
#include "factory.h"
#include "scopeinfo.h"
#include "top.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 22d2f4bc78..93fce79bdf 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -34,9 +34,11 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
+#include "messages.h"
#include "platform.h"
#include "serialize.h"
#include "snapshot.h"
+#include "top.h"
#include "utils.h"
#include "v8threads.h"
#include "version.h"
@@ -1569,6 +1571,18 @@ bool Value::IsInt32() const {
}
+bool Value::IsUint32() const {
+ if (IsDeadCheck("v8::Value::IsUint32()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
+ if (obj->IsNumber()) {
+ double value = obj->Number();
+ return i::FastUI2D(i::FastD2UI(value)) == value;
+ }
+ return false;
+}
+
+
bool Value::IsDate() const {
if (IsDeadCheck("v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -1974,6 +1988,23 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
}
+bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
+ ON_BAILOUT("v8::Object::Set()", return false);
+ ENTER_V8;
+ HandleScope scope;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj = i::SetElement(
+ self,
+ index,
+ value_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return true;
+}
+
+
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
@@ -2022,6 +2053,18 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) {
}
+Local<Value> v8::Object::Get(uint32_t index) {
+ ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+ ENTER_V8;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = i::GetElement(self, index);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ return Utils::ToLocal(result);
+}
+
+
Local<Value> v8::Object::GetPrototype() {
ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
ENTER_V8;
@@ -2614,7 +2657,7 @@ int String::WriteAscii(char* buffer, int start, int length) const {
StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
- str->TryFlattenIfNotFlat();
+ str->TryFlatten();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@@ -2727,6 +2770,17 @@ int32_t Int32::Value() const {
}
+uint32_t Uint32::Value() const {
+ if (IsDeadCheck("v8::Uint32::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<uint32_t>(obj->Number());
+ }
+}
+
+
int v8::Object::InternalFieldCount() {
if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
@@ -2820,9 +2874,9 @@ void v8::V8::LowMemoryNotification() {
}
-void v8::V8::ContextDisposedNotification() {
- if (!i::V8::IsRunning()) return;
- i::Heap::NotifyContextDisposed();
+int v8::V8::ContextDisposedNotification() {
+ if (!i::V8::IsRunning()) return 0;
+ return i::Heap::NotifyContextDisposed();
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index c79aac6569..6b226fd3df 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -81,100 +81,6 @@ void CpuFeatures::Probe() {
// -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0 = { 0 };
-Register r1 = { 1 };
-Register r2 = { 2 };
-Register r3 = { 3 };
-Register r4 = { 4 };
-Register r5 = { 5 };
-Register r6 = { 6 };
-Register r7 = { 7 };
-Register r8 = { 8 }; // Used as context register.
-Register r9 = { 9 };
-Register r10 = { 10 }; // Used as roots register.
-Register fp = { 11 };
-Register ip = { 12 };
-Register sp = { 13 };
-Register lr = { 14 };
-Register pc = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0 = { 0 };
-CRegister cr1 = { 1 };
-CRegister cr2 = { 2 };
-CRegister cr3 = { 3 };
-CRegister cr4 = { 4 };
-CRegister cr5 = { 5 };
-CRegister cr6 = { 6 };
-CRegister cr7 = { 7 };
-CRegister cr8 = { 8 };
-CRegister cr9 = { 9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0 = { 0 };
-SwVfpRegister s1 = { 1 };
-SwVfpRegister s2 = { 2 };
-SwVfpRegister s3 = { 3 };
-SwVfpRegister s4 = { 4 };
-SwVfpRegister s5 = { 5 };
-SwVfpRegister s6 = { 6 };
-SwVfpRegister s7 = { 7 };
-SwVfpRegister s8 = { 8 };
-SwVfpRegister s9 = { 9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0 = { 0 };
-DwVfpRegister d1 = { 1 };
-DwVfpRegister d2 = { 2 };
-DwVfpRegister d3 = { 3 };
-DwVfpRegister d4 = { 4 };
-DwVfpRegister d5 = { 5 };
-DwVfpRegister d6 = { 6 };
-DwVfpRegister d7 = { 7 };
-DwVfpRegister d8 = { 8 };
-DwVfpRegister d9 = { 9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = 0;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index f6b7a06aa2..c972c57b5d 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -84,25 +84,24 @@ struct Register {
int code_;
};
-
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
+const Register no_reg = { -1 };
+
+const Register r0 = { 0 };
+const Register r1 = { 1 };
+const Register r2 = { 2 };
+const Register r3 = { 3 };
+const Register r4 = { 4 };
+const Register r5 = { 5 };
+const Register r6 = { 6 };
+const Register r7 = { 7 };
+const Register r8 = { 8 }; // Used as context register.
+const Register r9 = { 9 };
+const Register r10 = { 10 }; // Used as roots register.
+const Register fp = { 11 };
+const Register ip = { 12 };
+const Register sp = { 13 };
+const Register lr = { 14 };
+const Register pc = { 15 };
// Single word VFP register.
struct SwVfpRegister {
@@ -139,57 +138,57 @@ struct DwVfpRegister {
};
-// Support for VFP registers s0 to s31 (d0 to d15).
+// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
+const SwVfpRegister s0 = { 0 };
+const SwVfpRegister s1 = { 1 };
+const SwVfpRegister s2 = { 2 };
+const SwVfpRegister s3 = { 3 };
+const SwVfpRegister s4 = { 4 };
+const SwVfpRegister s5 = { 5 };
+const SwVfpRegister s6 = { 6 };
+const SwVfpRegister s7 = { 7 };
+const SwVfpRegister s8 = { 8 };
+const SwVfpRegister s9 = { 9 };
+const SwVfpRegister s10 = { 10 };
+const SwVfpRegister s11 = { 11 };
+const SwVfpRegister s12 = { 12 };
+const SwVfpRegister s13 = { 13 };
+const SwVfpRegister s14 = { 14 };
+const SwVfpRegister s15 = { 15 };
+const SwVfpRegister s16 = { 16 };
+const SwVfpRegister s17 = { 17 };
+const SwVfpRegister s18 = { 18 };
+const SwVfpRegister s19 = { 19 };
+const SwVfpRegister s20 = { 20 };
+const SwVfpRegister s21 = { 21 };
+const SwVfpRegister s22 = { 22 };
+const SwVfpRegister s23 = { 23 };
+const SwVfpRegister s24 = { 24 };
+const SwVfpRegister s25 = { 25 };
+const SwVfpRegister s26 = { 26 };
+const SwVfpRegister s27 = { 27 };
+const SwVfpRegister s28 = { 28 };
+const SwVfpRegister s29 = { 29 };
+const SwVfpRegister s30 = { 30 };
+const SwVfpRegister s31 = { 31 };
+
+const DwVfpRegister d0 = { 0 };
+const DwVfpRegister d1 = { 1 };
+const DwVfpRegister d2 = { 2 };
+const DwVfpRegister d3 = { 3 };
+const DwVfpRegister d4 = { 4 };
+const DwVfpRegister d5 = { 5 };
+const DwVfpRegister d6 = { 6 };
+const DwVfpRegister d7 = { 7 };
+const DwVfpRegister d8 = { 8 };
+const DwVfpRegister d9 = { 9 };
+const DwVfpRegister d10 = { 10 };
+const DwVfpRegister d11 = { 11 };
+const DwVfpRegister d12 = { 12 };
+const DwVfpRegister d13 = { 13 };
+const DwVfpRegister d14 = { 14 };
+const DwVfpRegister d15 = { 15 };
// Coprocessor register
@@ -210,23 +209,24 @@ struct CRegister {
};
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
+const CRegister no_creg = { -1 };
+
+const CRegister cr0 = { 0 };
+const CRegister cr1 = { 1 };
+const CRegister cr2 = { 2 };
+const CRegister cr3 = { 3 };
+const CRegister cr4 = { 4 };
+const CRegister cr5 = { 5 };
+const CRegister cr6 = { 6 };
+const CRegister cr7 = { 7 };
+const CRegister cr8 = { 8 };
+const CRegister cr9 = { 9 };
+const CRegister cr10 = { 10 };
+const CRegister cr11 = { 11 };
+const CRegister cr12 = { 12 };
+const CRegister cr13 = { 13 };
+const CRegister cr14 = { 14 };
+const CRegister cr15 = { 15 };
// Coprocessor number
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index edb1b0ae7c..91e896d566 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -61,10 +61,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToRuntime expects r0 to contain the number of arguments
+ // JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToRuntime(ExternalReference(id));
+ __ JumpToExternalReference(ExternalReference(id));
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6644d02487..9e59582593 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
+#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
@@ -142,6 +143,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
+ Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
// Initialize state.
info_ = info;
@@ -3321,6 +3323,25 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
+// Generates the Math.pow method - currently just calls runtime.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Load(args->at(0));
+ Load(args->at(1));
+ frame_->CallRuntime(Runtime::kMath_pow, 2);
+ frame_->EmitPush(r0);
+}
+
+
+// Generates the Math.sqrt method - currently just calls runtime.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ frame_->CallRuntime(Runtime::kMath_sqrt, 1);
+ frame_->EmitPush(r0);
+}
+
+
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
@@ -3404,6 +3425,44 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateCharFromCode");
+ ASSERT(args->length() == 1);
+
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+
+ JumpTarget slow_case;
+ JumpTarget exit;
+
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ tst(r0, Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ slow_case.Branch(nz);
+
+ ASSERT(kSmiTag == 0);
+ __ mov(r1, Operand(Factory::single_character_string_cache()));
+ __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r1, ip);
+ slow_case.Branch(eq);
+
+ frame_->EmitPush(r1);
+ exit.Jump();
+
+ slow_case.Bind();
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kCharFromCode, 1);
+ frame_->EmitPush(r0);
+
+ exit.Bind();
+}
+
+
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
@@ -3625,6 +3684,24 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+ frame_->CallRuntime(Runtime::kMath_sin, 1);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+ frame_->CallRuntime(Runtime::kMath_cos, 1);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -4489,7 +4566,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ bind(&gc);
__ push(cp);
__ push(r3);
- __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
}
@@ -4539,7 +4616,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
@@ -4601,8 +4678,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&slow_case);
- ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
- __ TailCallRuntime(runtime, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
@@ -6170,12 +6246,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ return Handle<Code>::null();
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
__ StubReturn(1);
}
@@ -6784,7 +6865,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
@@ -6887,7 +6968,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -7178,6 +7259,170 @@ void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
}
+void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ sub(scratch, c1, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+ __ b(hi, &not_array_index);
+ __ sub(scratch, c2, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register
+ __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
+ __ b(ls, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1);
+ GenerateHashAddCharacter(masm, hash, c2);
+ GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Load undefined value
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ mov(mask, Operand(mask, ASR, 1));
+ __ sub(mask, mask, Operand(1));
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ add(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ Register candidate = scratch5; // Scratch register contains candidate.
+
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ and_(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ ASSERT_EQ(1, SymbolTable::kEntrySize);
+ __ ldr(candidate,
+ MemOperand(first_symbol_table_element,
+ candidate,
+ LSL,
+ kPointerSizeLog2));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, undefined);
+ __ b(eq, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ cmp(scratch, Operand(2));
+ __ b(ne, &next_probe[i]);
+
+ // Check that the candidate is a non-external ascii string.
+ __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
+ &next_probe[i]);
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ cmp(chars, scratch);
+ __ b(eq, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ if (!result.is(r0)) {
+ __ mov(r0, result);
+ }
+}
+
+
+void StringStubBase::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = character + (character << 10);
+ __ add(hash, character, Operand(character, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ add(hash, hash, Operand(character));
+ // hash += hash << 10;
+ __ add(hash, hash, Operand(hash, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ add(hash, hash, Operand(hash, LSL, 3));
+ // hash ^= hash >> 11;
+ __ eor(hash, hash, Operand(hash, ASR, 11));
+ // hash += hash << 15;
+ __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+
+ // if (hash == 0) hash = 27;
+ __ mov(hash, Operand(27), LeaveCC, nz);
+}
+
+
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -7213,11 +7458,14 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ sub(r2, r2, Operand(r3), SetCC);
__ b(mi, &runtime); // Fail if from > to.
- // Handle sub-strings of length 2 and less in the runtime system.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
__ cmp(r2, Operand(2));
- __ b(le, &runtime);
+ __ b(lt, &runtime);
// r2: length
+ // r3: from index (untaged smi)
// r6: from (smi)
// r7: to (smi)
@@ -7231,6 +7479,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type
// r2: length
+ // r3: from index (untaged smi)
// r5: string
// r6: from (smi)
// r7: to (smi)
@@ -7257,6 +7506,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type.
// r2: length
+ // r3: from index (untaged smi)
// r5: string
// r6: from (smi)
// r7: to (smi)
@@ -7266,6 +7516,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type.
// r2: result string length.
+ // r3: from index (untaged smi)
// r5: string.
// r6: from offset (smi)
// Check for flat ascii string.
@@ -7274,6 +7525,35 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(0, kTwoByteStringTag);
__ b(eq, &non_ascii_flat);
+ Label result_longer_than_two;
+ __ cmp(r2, Operand(2));
+ __ b(gt, &result_longer_than_two);
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ add(r5, r5, Operand(r3));
+ __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
+ __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, r3, r4, r1, r5, r6, r7, r9,
+ &make_two_character_string);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // r2: result string length.
+ // r3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
+ __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
// Allocate the result.
__ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
@@ -7331,7 +7611,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
}
@@ -7422,7 +7702,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -7482,14 +7762,52 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r4: first string instance type (if string_check_)
// r5: second string instance type (if string_check_)
// Look at the length of the result of adding the two strings.
- Label string_add_flat_result;
+ Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
ASSERT(String::kMaxLength * 2 > String::kMaxLength);
__ add(r6, r2, Operand(r3));
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ cmp(r6, Operand(2));
- __ b(eq, &string_add_runtime);
+ __ b(ne, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, r2, r3, r6, r7, r4, r5, r9,
+ &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in r2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode)
+ __ mov(r6, Operand(2));
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+ __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
// Check if resulting string will be flat.
__ cmp(r6, Operand(String::kMinNonFlatLength));
__ b(lt, &string_add_flat_result);
@@ -7568,6 +7886,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are sequential ASCII strings. We also know that they are
// short (since the sum of the lengths is less than kMinNonFlatLength).
+ // r6: length of resulting flat string
__ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
// Locate first character of result.
__ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -7636,7 +7955,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 2bc482ed5e..bea98b6693 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -370,6 +370,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateCharFromCode(ZoneList<Expression*>* args);
+
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -393,6 +396,16 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast support for Math.pow().
+ void GenerateMathPow(ZoneList<Expression*>* args);
+
+ // Fast call to sine function.
+ void GenerateMathSin(ZoneList<Expression*>* args);
+ void GenerateMathCos(ZoneList<Expression*>* args);
+
+ // Fast support for Math.pow().
+ void GenerateMathSqrt(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -554,6 +567,36 @@ class StringStubBase: public CodeStub {
Register scratch4,
Register scratch5,
int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
};
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 0d322d1a3e..5dedc29ab9 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -40,6 +40,7 @@ Register FastCodeGenerator::accumulator0() { return r0; }
Register FastCodeGenerator::accumulator1() { return r1; }
Register FastCodeGenerator::scratch0() { return r3; }
Register FastCodeGenerator::scratch1() { return r4; }
+Register FastCodeGenerator::scratch2() { return r5; }
Register FastCodeGenerator::receiver_reg() { return r2; }
Register FastCodeGenerator::context_reg() { return cp; }
@@ -100,7 +101,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
if (needs_write_barrier) {
__ mov(scratch1(), Operand(offset));
- __ RecordWrite(scratch0(), scratch1(), ip);
+ __ RecordWrite(scratch0(), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
@@ -180,6 +181,7 @@ void FastCodeGenerator::EmitBitOr() {
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
+ Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index c37e29f63c..230818f5d1 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -57,6 +57,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) {
int locals_count = scope()->num_stack_slots();
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 7ddb3386e6..e68a77a0fc 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -494,7 +494,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ stm(db_w, sp, r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -531,7 +532,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -545,7 +547,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
- __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
@@ -662,7 +664,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(r0); // key
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(
+ __ TailCallExternalReference(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
__ bind(&slow);
@@ -681,7 +683,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -695,7 +698,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@@ -854,7 +857,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ stm(db_w, sp, r2.bit() | r0.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -897,7 +901,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(receiver);
__ push(value);
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ __ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index b249d696d3..36bebdfeb4 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -220,7 +220,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// remembered set bits in the new space.
// object: heap object pointer (with tag)
// offset: offset to store location from the object
- and_(scratch, object, Operand(Heap::NewSpaceMask()));
+ and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(eq, &done);
@@ -1234,19 +1234,26 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- JumpToRuntime(ext);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
-void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1410,15 +1417,12 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
- and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
- b(ne, failure);
+
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
}
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
@@ -1439,6 +1443,36 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
}
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ and_(scratch1, first, Operand(kFlatAsciiStringMask));
+ and_(scratch2, second, Operand(kFlatAsciiStringMask));
+ cmp(scratch1, Operand(kFlatAsciiStringTag));
+ // Ignore second test if first test failed.
+ cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ b(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ and_(scratch, type, Operand(kFlatAsciiStringMask));
+ cmp(scratch, Operand(kFlatAsciiStringTag));
+ b(ne, failure);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 98cea16389..5d9e51304a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -333,7 +333,6 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- // Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -344,14 +343,19 @@ class MacroAssembler: public Assembler {
int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToRuntime, but also takes care of passing the number
+ // Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
- void TailCallRuntime(const ExternalReference& ext,
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to a runtime routine.
- void JumpToRuntime(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -421,6 +425,22 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* not_flat_ascii_strings);
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 9dd3b93266..f621be47be 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -765,7 +765,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label grow_failed;
// Call GrowStack(backtrack_stackpointer())
- int num_arguments = 2;
+ static const int num_arguments = 2;
FrameAlign(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
@@ -966,7 +966,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- int num_arguments = 3;
+ static const int num_arguments = 3;
FrameAlign(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index da73942150..5d5b2a5d9a 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -297,7 +297,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(receiver_reg);
__ mov(r2, Operand(Handle<Map>(transition)));
__ stm(db_w, sp, r2.bit() | r0.bit());
- __ TailCallRuntime(
+ __ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
3, 1);
return;
@@ -529,7 +529,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@@ -549,7 +549,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
}
private:
@@ -719,7 +719,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5, 1);
+ __ TailCallExternalReference(load_callback_property, 5, 1);
return true;
}
@@ -1204,7 +1204,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4, 1);
+ __ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1251,7 +1251,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 6e1a47fb6e..ab6e5f8136 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -35,27 +35,8 @@
namespace v8 {
namespace internal {
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
#define __ ACCESS_MASM(masm())
-
-// On entry to a function, the virtual frame already contains the
-// receiver and the parameters. All initial frame elements are in
-// memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count()) { // 0-based index of TOS.
- for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
- }
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- register_locations_[i] = kIllegalIndex;
- }
-}
-
-
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
@@ -314,7 +295,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
__ push(reg);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index f69bddf55f..6eb08119ed 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -59,7 +59,7 @@ class VirtualFrame : public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- VirtualFrame();
+ inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
@@ -69,7 +69,7 @@ class VirtualFrame : public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -344,7 +344,7 @@ class VirtualFrame : public ZoneObject {
void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+ inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index c28a662982..e33c2809ce 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1149,6 +1149,8 @@ function SetupArray() {
ArrayReduce: 1,
ArrayReduceRight: 1
});
+
+ %FinishArrayPrototypeSetup($Array.prototype);
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 96d516f18e..aaf10efe8b 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -579,6 +579,11 @@ ExternalReference ExternalReference::random_positive_smi_function() {
}
+ExternalReference ExternalReference::transcendental_cache_array_address() {
+ return ExternalReference(TranscendentalCache::cache_array_address());
+}
+
+
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
}
@@ -619,6 +624,11 @@ ExternalReference ExternalReference::new_space_start() {
}
+ExternalReference ExternalReference::new_space_mask() {
+ return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
+}
+
+
ExternalReference ExternalReference::new_space_allocation_top_address() {
return ExternalReference(Heap::NewSpaceAllocationTopAddress());
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index f4013061eb..cde7d69247 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -37,7 +37,6 @@
#include "runtime.h"
#include "top.h"
-#include "zone-inl.h"
#include "token.h"
namespace v8 {
@@ -400,6 +399,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference perform_gc_function();
static ExternalReference random_positive_smi_function();
+ static ExternalReference transcendental_cache_array_address();
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
@@ -427,6 +427,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
+ static ExternalReference new_space_mask();
static ExternalReference heap_always_allocate_scope_depth();
// Used for fast allocation in generated code.
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 7cb5578317..062a5c67ad 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -67,8 +67,6 @@ VariableProxy::VariableProxy(Handle<String> name,
inside_with_(inside_with) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
- // at least one access, otherwise no need for a VariableProxy
- var_uses_.RecordRead(1);
}
@@ -87,8 +85,7 @@ void VariableProxy::BindTo(Variable* var) {
// eval() etc. Const-ness and variable declarations are a complete mess
// in JS. Sigh...
var_ = var;
- var->var_uses()->RecordUses(&var_uses_);
- var->obj_uses()->RecordUses(&obj_uses_);
+ var->set_is_used(true);
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index f2171cc3ef..13502dc2a8 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -117,6 +117,9 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
+ static const int kNoNumber = -1;
+
+ AstNode() : num_(kNoNumber) {}
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -141,6 +144,13 @@ class AstNode: public ZoneObject {
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
+
+ int num() { return num_; }
+ void set_num(int n) { num_ = n; }
+
+ private:
+ // Support for ast node numbering.
+ int num_;
};
@@ -181,9 +191,10 @@ class Expression: public AstNode {
kTestValue
};
- static const int kNoLabel = -1;
-
- Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
+ Expression()
+ : bitfields_(0),
+ def_(NULL),
+ defined_vars_(NULL) {}
virtual Expression* AsExpression() { return this; }
@@ -211,11 +222,6 @@ class Expression: public AstNode {
// Static type information for this expression.
StaticType* type() { return &type_; }
- int num() { return num_; }
-
- // AST node numbering ordered by evaluation order.
- void set_num(int n) { num_ = n; }
-
// Data flow information.
DefinitionInfo* var_def() { return def_; }
void set_var_def(DefinitionInfo* def) { def_ = def; }
@@ -225,11 +231,36 @@ class Expression: public AstNode {
defined_vars_ = defined_vars;
}
+ // AST analysis results
+
+ // True if the expression rooted at this node can be compiled by the
+ // side-effect free compiler.
+ bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
+ void set_side_effect_free(bool is_side_effect_free) {
+ bitfields_ &= ~SideEffectFreeField::mask();
+ bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
+ }
+
+ // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
+ // be applied to the value of this expression?
+ // If so, we may be able to optimize the calculation of the value.
+ bool to_int32() { return ToInt32Field::decode(bitfields_); }
+ void set_to_int32(bool to_int32) {
+ bitfields_ &= ~ToInt32Field::mask();
+ bitfields_ |= ToInt32Field::encode(to_int32);
+ }
+
+
private:
+ uint32_t bitfields_;
StaticType type_;
- int num_;
+
DefinitionInfo* def_;
ZoneList<DefinitionInfo*>* defined_vars_;
+
+ // Using template BitField<type, start, size>.
+ class SideEffectFreeField : public BitField<bool, 0, 1> {};
+ class ToInt32Field : public BitField<bool, 1, 1> {};
};
@@ -931,6 +962,10 @@ class VariableProxy: public Expression {
return var()->is_global() || var()->rewrite()->IsLeaf();
}
+ // Reading from a mutable variable is a side effect, but 'this' is
+ // immutable.
+ virtual bool IsTrivial() { return is_this(); }
+
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@@ -942,8 +977,6 @@ class VariableProxy: public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
- UseCount* var_uses() { return &var_uses_; }
- UseCount* obj_uses() { return &obj_uses_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
@@ -956,10 +989,6 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
- // VariableProxy usage info.
- UseCount var_uses_; // uses of the variable value
- UseCount obj_uses_; // uses of the object the variable points to
-
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@@ -1018,6 +1047,8 @@ class Slot: public Expression {
virtual bool IsLeaf() { return true; }
+ bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
+
// Accessors
Variable* var() const { return var_; }
Type type() const { return type_; }
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index a7cf421b5b..12efbc17ab 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1050,6 +1050,19 @@ bool Genesis::InstallNatives() {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
global_context()->set_empty_script(*script);
}
+ {
+ // Builtin function for OpaqueReference -- a JSValue-based object,
+ // that keeps its field isolated from JavaScript code. It may store
+ // objects, that JavaScript code may not access.
+ Handle<JSFunction> opaque_reference_fun =
+ InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
+ JSValue::kSize, Top::initial_object_prototype(),
+ Builtins::Illegal, false);
+ Handle<JSObject> prototype =
+ Factory::NewJSObject(Top::object_function(), TENURED);
+ SetPrototype(opaque_reference_fun, prototype);
+ global_context()->set_opaque_reference_function(*opaque_reference_fun);
+ }
if (FLAG_natives_file == NULL) {
// Without natives file, install default natives.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 8e88c28695..a8ba818c41 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -242,6 +242,109 @@ BUILTIN(ArrayCodeGeneric) {
}
+static Object* AllocateJSArray() {
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+ Object* result = Heap::AllocateJSObject(array_function);
+ if (result->IsFailure()) return result;
+ return result;
+}
+
+
+static Object* AllocateEmptyJSArray() {
+ Object* result = AllocateJSArray();
+ if (result->IsFailure()) return result;
+ JSArray* result_array = JSArray::cast(result);
+ result_array->set_length(Smi::FromInt(0));
+ result_array->set_elements(Heap::empty_fixed_array());
+ return result_array;
+}
+
+
+static void CopyElements(AssertNoAllocation* no_gc,
+ FixedArray* dst,
+ int dst_index,
+ FixedArray* src,
+ int src_index,
+ int len) {
+ ASSERT(dst != src); // Use MoveElements instead.
+ memcpy(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kPointerSize);
+ WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
+ if (mode == UPDATE_WRITE_BARRIER) {
+ Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ }
+}
+
+
+static void MoveElements(AssertNoAllocation* no_gc,
+ FixedArray* dst,
+ int dst_index,
+ FixedArray* src,
+ int src_index,
+ int len) {
+ memmove(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kPointerSize);
+ WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
+ if (mode == UPDATE_WRITE_BARRIER) {
+ Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ }
+}
+
+
+static void FillWithHoles(FixedArray* dst, int from, int to) {
+ MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
+}
+
+
+static bool ArrayPrototypeHasNoElements() {
+ // This method depends on non writability of Object and Array prototype
+ // fields.
+ Context* global_context = Top::context()->global_context();
+ // Array.prototype
+ JSObject* proto =
+ JSObject::cast(global_context->array_function()->prototype());
+ if (proto->elements() != Heap::empty_fixed_array()) return false;
+ // Hidden prototype
+ proto = JSObject::cast(proto->GetPrototype());
+ ASSERT(proto->elements() == Heap::empty_fixed_array());
+ // Object.prototype
+ proto = JSObject::cast(proto->GetPrototype());
+ if (proto != global_context->initial_object_prototype()) return false;
+ if (proto->elements() != Heap::empty_fixed_array()) return false;
+ ASSERT(proto->GetPrototype()->IsNull());
+ return true;
+}
+
+
+static Object* CallJsBuiltin(const char* name,
+ BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+ HandleScope handleScope;
+
+ Handle<Object> js_builtin =
+ GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
+ name);
+ ASSERT(js_builtin->IsJSFunction());
+ Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+ Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
+ int n_args = args.length() - 1;
+ for (int i = 0; i < n_args; i++) {
+ argv[i] = &args[i + 1];
+ }
+ bool pending_exception = false;
+ Handle<Object> result = Execution::Call(function,
+ args.receiver(),
+ n_args,
+ argv.start(),
+ &pending_exception);
+ argv.Dispose();
+ if (pending_exception) return Failure::Exception();
+ return *result;
+}
+
+
BUILTIN(ArrayPush) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@@ -261,22 +364,21 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
+ FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
- FixedArray* new_elms = FixedArray::cast(obj);
- WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
- // Fill out the new array with old elements.
- for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
+ CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+ FillWithHoles(new_elms, new_length, capacity);
+
elms = new_elms;
array->set_elements(elms);
}
+ // Add the provided values.
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-
- // Add the provided values.
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
}
@@ -290,10 +392,9 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
- Object* undefined = Heap::undefined_value();
int len = Smi::cast(array->length())->value();
- if (len == 0) return undefined;
+ if (len == 0) return Heap::undefined_value();
// Get top element
FixedArray* elms = FixedArray::cast(array->elements());
@@ -318,41 +419,28 @@ BUILTIN(ArrayPop) {
}
-static Object* GetElementToMove(uint32_t index,
- FixedArray* elms,
- JSObject* prototype) {
- Object* e = elms->get(index);
- if (e->IsTheHole() && prototype->HasElement(index)) {
- e = prototype->GetElement(index);
+BUILTIN(ArrayShift) {
+ if (!ArrayPrototypeHasNoElements()) {
+ return CallJsBuiltin("ArrayShift", args);
}
- return e;
-}
-
-BUILTIN(ArrayShift) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
- // Fetch the prototype.
- JSFunction* array_function =
- Top::context()->global_context()->array_function();
- JSObject* prototype = JSObject::cast(array_function->prototype());
-
FixedArray* elms = FixedArray::cast(array->elements());
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
- first = prototype->GetElement(0);
+ first = Heap::undefined_value();
}
// Shift the elements.
- for (int i = 0; i < len - 1; i++) {
- elms->set(i, GetElementToMove(i + 1, elms, prototype));
- }
+ AssertNoAllocation no_gc;
+ MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, Heap::the_hole_value());
// Set the length.
@@ -363,6 +451,10 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
+ if (!ArrayPrototypeHasNoElements()) {
+ return CallJsBuiltin("ArrayUnshift", args);
+ }
+
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@@ -379,38 +471,22 @@ BUILTIN(ArrayUnshift) {
FixedArray* elms = FixedArray::cast(array->elements());
- // Fetch the prototype.
- JSFunction* array_function =
- Top::context()->global_context()->array_function();
- JSObject* prototype = JSObject::cast(array_function->prototype());
-
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
+ FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
- FixedArray* new_elms = FixedArray::cast(obj);
- WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
- // Fill out the new array with old elements.
- for (int i = 0; i < len; i++)
- new_elms->set(to_add + i,
- GetElementToMove(i, elms, prototype),
- mode);
+ CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+ FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-
- // Move elements to the right
- for (int i = 0; i < len; i++) {
- elms->set(new_length - i - 1,
- GetElementToMove(len - i - 1, elms, prototype),
- mode);
- }
+ MoveElements(&no_gc, elms, to_add, elms, 0, len);
}
// Add the provided values.
@@ -426,32 +502,11 @@ BUILTIN(ArrayUnshift) {
}
-static Object* CallJsBuiltin(const char* name,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- HandleScope handleScope;
-
- Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
- name);
- ASSERT(js_builtin->IsJSFunction());
- Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
- Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
- int n_args = args.length() - 1;
- for (int i = 0; i < n_args; i++) {
- argv[i] = &args[i + 1];
+BUILTIN(ArraySlice) {
+ if (!ArrayPrototypeHasNoElements()) {
+ return CallJsBuiltin("ArraySlice", args);
}
- bool pending_exception = false;
- Handle<Object> result = Execution::Call(function,
- args.receiver(),
- n_args,
- argv.start(),
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- return *result;
-}
-
-BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@@ -460,21 +515,21 @@ BUILTIN(ArraySlice) {
int n_arguments = args.length() - 1;
// Note carefully choosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relativeStart
- // and to len for relativeEnd.
- int relativeStart = 0;
- int relativeEnd = len;
+ // it's undefined which gets converted to 0 for relative_start
+ // and to len for relative_end.
+ int relative_start = 0;
+ int relative_end = len;
if (n_arguments > 0) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
- relativeStart = Smi::cast(arg1)->value();
+ relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
- relativeEnd = Smi::cast(arg2)->value();
+ relative_end = Smi::cast(arg2)->value();
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
@@ -482,43 +537,31 @@ BUILTIN(ArraySlice) {
}
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
- : Min(relativeStart, len);
+ int k = (relative_start < 0) ? Max(len + relative_start, 0)
+ : Min(relative_start, len);
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
- : Min(relativeEnd, len);
+ int final = (relative_end < 0) ? Max(len + relative_end, 0)
+ : Min(relative_end, len);
// Calculate the length of result array.
int result_len = final - k;
- if (result_len < 0) {
- result_len = 0;
+ if (result_len <= 0) {
+ return AllocateEmptyJSArray();
}
- JSFunction* array_function =
- Top::context()->global_context()->array_function();
- Object* result = Heap::AllocateJSObject(array_function);
+ Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
- result = Heap::AllocateFixedArrayWithHoles(result_len);
+ result = Heap::AllocateUninitializedFixedArray(result_len);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
FixedArray* elms = FixedArray::cast(array->elements());
- // Fetch the prototype.
- JSObject* prototype = JSObject::cast(array_function->prototype());
-
AssertNoAllocation no_gc;
- WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
-
- // Fill newly created array.
- for (int i = 0; i < result_len; i++) {
- result_elms->set(i,
- GetElementToMove(k + i, elms, prototype),
- mode);
- }
+ CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
// Set elements.
result_array->set_elements(result_elms);
@@ -530,6 +573,10 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
+ if (!ArrayPrototypeHasNoElements()) {
+ return CallJsBuiltin("ArraySplice", args);
+ }
+
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@@ -546,118 +593,111 @@ BUILTIN(ArraySplice) {
return Heap::undefined_value();
}
- int relativeStart = 0;
+ int relative_start = 0;
Object* arg1 = args[1];
if (arg1->IsSmi()) {
- relativeStart = Smi::cast(arg1)->value();
+ relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySplice", args);
}
- int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
- : Min(relativeStart, len);
+ int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+ : Min(relative_start, len);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given differently from when an undefined delete count is given.
// This does not follow ECMA-262, but we do the same for
// compatibility.
- int deleteCount = len;
+ int delete_count = len;
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
- deleteCount = Smi::cast(arg2)->value();
+ delete_count = Smi::cast(arg2)->value();
} else {
return CallJsBuiltin("ArraySplice", args);
}
}
- int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
-
- JSFunction* array_function =
- Top::context()->global_context()->array_function();
-
- // Allocate result array.
- Object* result = Heap::AllocateJSObject(array_function);
- if (result->IsFailure()) return result;
- JSArray* result_array = JSArray::cast(result);
-
- result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
- if (result->IsFailure()) return result;
- FixedArray* result_elms = FixedArray::cast(result);
+ int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
FixedArray* elms = FixedArray::cast(array->elements());
- // Fetch the prototype.
- JSObject* prototype = JSObject::cast(array_function->prototype());
+ JSArray* result_array = NULL;
+ if (actual_delete_count == 0) {
+ Object* result = AllocateEmptyJSArray();
+ if (result->IsFailure()) return result;
+ result_array = JSArray::cast(result);
+ } else {
+ // Allocate result array.
+ Object* result = AllocateJSArray();
+ if (result->IsFailure()) return result;
+ result_array = JSArray::cast(result);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
+ result = Heap::AllocateUninitializedFixedArray(actual_delete_count);
+ if (result->IsFailure()) return result;
+ FixedArray* result_elms = FixedArray::cast(result);
- // Fill newly created array.
- for (int k = 0; k < actualDeleteCount; k++) {
- result_elms->set(k,
- GetElementToMove(actualStart + k, elms, prototype),
- mode);
- }
+ AssertNoAllocation no_gc;
+ // Fill newly created array.
+ CopyElements(&no_gc,
+ result_elms, 0,
+ elms, actual_start,
+ actual_delete_count);
- // Set elements.
- result_array->set_elements(result_elms);
+ // Set elements.
+ result_array->set_elements(result_elms);
- // Set the length.
- result_array->set_length(Smi::FromInt(actualDeleteCount));
+ // Set the length.
+ result_array->set_length(Smi::FromInt(actual_delete_count));
+ }
- int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
+ int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actualDeleteCount + itemCount;
+ int new_length = len - actual_delete_count + item_count;
- mode = elms->GetWriteBarrierMode(no_gc);
- if (itemCount < actualDeleteCount) {
+ if (item_count < actual_delete_count) {
// Shrink the array.
- for (int k = actualStart; k < (len - actualDeleteCount); k++) {
- elms->set(k + itemCount,
- GetElementToMove(k + actualDeleteCount, elms, prototype),
- mode);
- }
-
- for (int k = len; k > new_length; k--) {
- elms->set(k - 1, Heap::the_hole_value());
- }
- } else if (itemCount > actualDeleteCount) {
+ AssertNoAllocation no_gc;
+ MoveElements(&no_gc,
+ elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(elms, new_length, len);
+ } else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
-
- FixedArray* source_elms = elms;
+ ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
// Check if array need to grow.
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
-
FixedArray* new_elms = FixedArray::cast(obj);
- mode = new_elms->GetWriteBarrierMode(no_gc);
- // Copy the part before actualStart as is.
- for (int k = 0; k < actualStart; k++) {
- new_elms->set(k, elms->get(k), mode);
- }
+ AssertNoAllocation no_gc;
+ // Copy the part before actual_start as is.
+ CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
+ CopyElements(&no_gc,
+ new_elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(new_elms, new_length, capacity);
- source_elms = elms;
elms = new_elms;
array->set_elements(elms);
- }
-
- for (int k = len - actualDeleteCount; k > actualStart; k--) {
- elms->set(k + itemCount - 1,
- GetElementToMove(k + actualDeleteCount - 1,
- source_elms,
- prototype),
- mode);
+ } else {
+ AssertNoAllocation no_gc;
+ MoveElements(&no_gc,
+ elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
}
}
- for (int k = actualStart; k < actualStart + itemCount; k++) {
- elms->set(k, args[3 + k - actualStart], mode);
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ elms->set(k, args[3 + k - actual_start], mode);
}
// Set the length.
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4d0fd29923..e42f75894b 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -83,6 +83,11 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
}
+int CodeStub::GetCodeKind() {
+ return Code::STUB;
+}
+
+
Handle<Code> CodeStub::GetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
@@ -97,7 +102,10 @@ Handle<Code> CodeStub::GetCode() {
masm.GetCode(&desc);
// Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()),
+ InLoop(),
+ GetICState());
Handle<Code> new_object =
Factory::NewCode(desc, NULL, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
@@ -132,7 +140,10 @@ Object* CodeStub::TryGetCode() {
masm.GetCode(&desc);
// Try to copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()),
+ InLoop(),
+ GetICState());
Object* new_object =
Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 3901a64789..de2ad56cc6 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -28,6 +28,8 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
+#include "globals.h"
+
namespace v8 {
namespace internal {
@@ -48,6 +50,7 @@ namespace internal {
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
+ V(TranscendentalCache) \
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
@@ -138,6 +141,14 @@ class CodeStub BASE_EMBEDDED {
// lazily generated function should be fully optimized or not.
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
+ // GenericBinaryOpStub needs to override this.
+ virtual int GetCodeKind();
+
+ // GenericBinaryOpStub needs to override this.
+ virtual InlineCacheState GetICState() {
+ return UNINITIALIZED;
+ }
+
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 5e25f69099..6841c21612 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -369,6 +369,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
+ {&CodeGenerator::GenerateCharFromCode, "_CharFromCode"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
@@ -380,6 +381,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
{&CodeGenerator::GenerateNumberToString, "_NumberToString"},
+ {&CodeGenerator::GenerateMathPow, "_Math_pow"},
+ {&CodeGenerator::GenerateMathSin, "_Math_sin"},
+ {&CodeGenerator::GenerateMathCos, "_Math_cos"},
+ {&CodeGenerator::GenerateMathSqrt, "_Math_sqrt"},
};
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 54273673a0..9dcbeb5eeb 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -32,28 +32,23 @@
namespace v8 {
namespace internal {
-
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
-#if defined(ANDROID)
-static const int kScriptGenerations = 1;
-static const int kEvalGlobalGenerations = 1;
-static const int kEvalContextualGenerations = 1;
-static const int kRegExpGenerations = 1;
-#else
// The number of ScriptGenerations is carefully chosen based on histograms.
// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
static const int kScriptGenerations = 5;
static const int kEvalGlobalGenerations = 2;
static const int kEvalContextualGenerations = 2;
static const int kRegExpGenerations = 2;
-#endif
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
+// Index for the first generation in the cache.
+static const int kFirstGeneration = 0;
+
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
@@ -70,6 +65,15 @@ class CompilationSubCache {
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
+ // Accessors for first generation.
+ Handle<CompilationCacheTable> GetFirstTable() {
+ return GetTable(kFirstGeneration);
+ }
+ void SetFirstTable(Handle<CompilationCacheTable> value) {
+ ASSERT(kFirstGeneration < generations_);
+ tables_[kFirstGeneration] = *value;
+ }
+
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
@@ -104,6 +108,10 @@ class CompilationCacheScript : public CompilationSubCache {
void Put(Handle<String> source, Handle<JSFunction> boilerplate);
private:
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(Handle<String> source,
+ Handle<JSFunction> boilerplate);
+
bool HasOrigin(Handle<JSFunction> boilerplate,
Handle<Object> name,
int line_offset,
@@ -125,6 +133,12 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<Context> context,
Handle<JSFunction> boilerplate);
+ private:
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
@@ -140,6 +154,11 @@ class CompilationCacheRegExp: public CompilationSubCache {
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
+ private:
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
@@ -287,12 +306,19 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
}
+Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
+ Handle<String> source,
+ Handle<JSFunction> boilerplate) {
+ CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate),
+ CompilationCacheTable);
+}
+
+
void CompilationCacheScript::Put(Handle<String> source,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(0);
- CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+ SetFirstTable(TablePut(source, boilerplate));
}
@@ -326,13 +352,21 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
}
+Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate) {
+ CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate),
+ CompilationCacheTable);
+}
+
+
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(0);
- CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+ SetFirstTable(TablePut(source, context, boilerplate));
}
@@ -366,12 +400,20 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
}
+Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data) {
+ CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data),
+ CompilationCacheTable);
+}
+
+
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(0);
- CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+ SetFirstTable(TablePut(source, flags, data));
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 557a91e4db..ebb62f11c2 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -31,14 +31,14 @@
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
+#include "data-flow.h"
#include "debug.h"
#include "fast-codegen.h"
#include "full-codegen.h"
+#include "liveedit.h"
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
-#include "usage-analyzer.h"
-#include "liveedit.h"
namespace v8 {
namespace internal {
@@ -48,7 +48,7 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
FunctionLiteral* function = info->function();
ASSERT(function != NULL);
// Rewrite the AST by introducing .result assignments where needed.
- if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
+ if (!Rewriter::Process(function)) {
// Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller.
return Handle<Code>::null();
@@ -79,6 +79,17 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null();
}
+ if (FLAG_use_flow_graph) {
+ FlowGraphBuilder builder;
+ builder.Build(function);
+
+#ifdef DEBUG
+ if (FLAG_print_graph_text) {
+ builder.graph()->PrintText(builder.postorder());
+ }
+#endif
+ }
+
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not:
@@ -117,6 +128,14 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
+ Handle<Context> context = Handle<Context>::null();
+ return MakeCode(context, info);
+}
+#endif
+
+
static Handle<JSFunction> MakeFunction(bool is_global,
bool is_eval,
Compiler::ValidationState validate,
@@ -224,7 +243,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
- Debugger::OnAfterCompile(script, fun);
+ Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
return fun;
@@ -444,6 +463,17 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
return Handle<JSFunction>::null();
}
+ if (FLAG_use_flow_graph) {
+ FlowGraphBuilder builder;
+ builder.Build(literal);
+
+#ifdef DEBUG
+ if (FLAG_print_graph_text) {
+ builder.graph()->PrintText(builder.postorder());
+ }
+#endif
+ }
+
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index f01889d0a1..8e220e6dc0 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -276,6 +276,13 @@ class Compiler : public AllStatic {
};
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info);
+
+#endif
+
+
// During compilation we need a global list of handles to constants
// for frame elements. When the zone gets deleted, we make sure to
// clear this list of handles as well.
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 9baf072119..98ebc479f7 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -95,6 +95,7 @@ enum ContextLookupFlags {
call_as_constructor_delegate) \
V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
+ V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
@@ -216,6 +217,7 @@ class Context: public FixedArray {
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
EMPTY_SCRIPT_INDEX,
SCRIPT_FUNCTION_INDEX,
+ OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index ba7220a4a6..f7210d5af9 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -59,6 +59,32 @@ static inline int FastD2I(double x) {
}
+// The fast double-to-unsigned-int conversion routine does not guarantee
+// rounding towards zero.
+static inline unsigned int FastD2UI(double x) {
+ // There is no unsigned version of lrint, so there is no fast path
+ // in this function as there is in FastD2I. Using lrint doesn't work
+ // for values of 2^31 and above.
+
+ // Convert "small enough" doubles to uint32_t by fixing the 32
+ // least significant non-fractional bits in the low 32 bits of the
+ // double, and reading them from there.
+ const double k2Pow52 = 4503599627370496.0;
+ bool negative = x < 0;
+ if (negative) {
+ x = -x;
+ }
+ if (x < k2Pow52) {
+ x += k2Pow52;
+ uint32_t result;
+ memcpy(&result, &x, sizeof(result)); // Copy low 32 bits.
+ return negative ? ~result + 1 : result;
+ }
+ // Large number (outside uint32 range), Infinity or NaN.
+ return 0x80000000u; // Return integer indefinite.
+}
+
+
static inline double DoubleToInteger(double x) {
if (isnan(x)) return 0;
if (!isfinite(x) || x == 0) return x;
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 67f7d53f51..bdc7e44a16 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -32,11 +32,12 @@ namespace v8 {
namespace internal {
-// The fast double-to-int conversion routine does not guarantee
+// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
static inline int FastD2I(double x);
+static inline unsigned int FastD2UI(double x);
static inline double FastI2D(int x) {
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index 5e9d217d2a..6b45da02b9 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -33,6 +33,540 @@ namespace v8 {
namespace internal {
+void FlowGraph::AppendInstruction(AstNode* instruction) {
+ ASSERT(instruction != NULL);
+ if (is_empty() || !exit()->IsBlockNode()) {
+ AppendNode(new BlockNode());
+ }
+ BlockNode::cast(exit())->AddInstruction(instruction);
+}
+
+
+void FlowGraph::AppendNode(Node* node) {
+ ASSERT(node != NULL);
+ if (is_empty()) {
+ entry_ = exit_ = node;
+ } else {
+ exit()->AddSuccessor(node);
+ node->AddPredecessor(exit());
+ exit_ = node;
+ }
+}
+
+
+void FlowGraph::AppendGraph(FlowGraph* graph) {
+ ASSERT(!graph->is_empty());
+ if (is_empty()) {
+ entry_ = graph->entry();
+ exit_ = graph->exit();
+ } else {
+ exit()->AddSuccessor(graph->entry());
+ graph->entry()->AddPredecessor(exit());
+ exit_ = graph->exit();
+ }
+}
+
+
+void FlowGraph::Split(BranchNode* branch,
+ FlowGraph* left,
+ FlowGraph* right,
+ JoinNode* merge) {
+ // Graphs are in edge split form. Add empty blocks if necessary.
+ if (left->is_empty()) left->AppendNode(new BlockNode());
+ if (right->is_empty()) right->AppendNode(new BlockNode());
+
+ // Add the branch, left flowgraph and merge.
+ AppendNode(branch);
+ AppendGraph(left);
+ AppendNode(merge);
+
+ // Splice in the right flowgraph.
+ right->AppendNode(merge);
+ branch->AddSuccessor(right->entry());
+ right->entry()->AddPredecessor(branch);
+}
+
+
+void FlowGraph::Loop(JoinNode* merge,
+ FlowGraph* condition,
+ BranchNode* branch,
+ FlowGraph* body) {
+ // Add the merge, condition and branch. Add merge's predecessors in
+ // left-to-right order.
+ AppendNode(merge);
+ body->AppendNode(merge);
+ AppendGraph(condition);
+ AppendNode(branch);
+
+ // Splice in the body flowgraph.
+ branch->AddSuccessor(body->entry());
+ body->entry()->AddPredecessor(branch);
+}
+
+
+void EntryNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor_ != NULL);
+ preorder->Add(this);
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ successor_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void ExitNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ preorder->Add(this);
+ postorder->Add(this);
+}
+
+
+void BlockNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor_ != NULL);
+ preorder->Add(this);
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ successor_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void BranchNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor0_ != NULL && successor1_ != NULL);
+ preorder->Add(this);
+ if (!successor0_->IsMarkedWith(mark)) {
+ successor0_->MarkWith(mark);
+ successor0_->Traverse(mark, preorder, postorder);
+ }
+ if (!successor1_->IsMarkedWith(mark)) {
+ successor1_->MarkWith(mark);
+ successor1_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void JoinNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor_ != NULL);
+ preorder->Add(this);
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ successor_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void FlowGraphBuilder::Build(FunctionLiteral* lit) {
+ graph_ = FlowGraph::Empty();
+ graph_.AppendNode(new EntryNode());
+ global_exit_ = new ExitNode();
+ VisitStatements(lit->body());
+
+ if (HasStackOverflow()) {
+ graph_ = FlowGraph::Empty();
+ return;
+ }
+
+ graph_.AppendNode(global_exit_);
+
+ // Build preorder and postorder traversal orders. All the nodes in
+ // the graph have the same mark flag. For the traversal, use that
+ // flag's negation. Traversal will flip all the flags.
+ bool mark = graph_.entry()->IsMarkedWith(false);
+ graph_.entry()->MarkWith(mark);
+ graph_.entry()->Traverse(mark, &preorder_, &postorder_);
+}
+
+
+void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void FlowGraphBuilder::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->then_statement());
+
+ FlowGraph left = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->else_statement());
+
+ JoinNode* join = new JoinNode();
+ original.Split(branch, &left, &graph_, join);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
+ graph_.AppendInstruction(stmt);
+ graph_.AppendNode(global_exit());
+}
+
+
+void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ Visit(stmt->expression());
+ graph_.AppendInstruction(stmt);
+}
+
+
+void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+ graph_.AppendInstruction(stmt);
+}
+
+
+void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ JoinNode* join = new JoinNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->body());
+
+ FlowGraph body = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->cond());
+
+ BranchNode* branch = new BranchNode();
+
+ // Add body, condition and branch.
+ original.AppendNode(join);
+ original.AppendGraph(&body);
+ original.AppendGraph(&graph_); // The condition.
+ original.AppendNode(branch);
+
+ // Tie the knot.
+ branch->AddSuccessor(join);
+ join->AddPredecessor(branch);
+
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+ JoinNode* join = new JoinNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->cond());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph condition = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->body());
+
+ original.Loop(join, &condition, branch, &graph_);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
+ if (stmt->init() != NULL) Visit(stmt->init());
+
+ JoinNode* join = new JoinNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ if (stmt->cond() != NULL) Visit(stmt->cond());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph condition = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->body());
+
+ if (stmt->next() != NULL) Visit(stmt->next());
+
+ original.Loop(join, &condition, branch, &graph_);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+ Visit(stmt->enumerable());
+
+ JoinNode* join = new JoinNode();
+ FlowGraph empty;
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(stmt->body());
+
+ original.Loop(join, &empty, branch, &graph_);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ graph_.AppendInstruction(stmt);
+}
+
+
+void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitConditional(Conditional* expr) {
+ Visit(expr->condition());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(expr->then_expression());
+
+ FlowGraph left = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(expr->else_expression());
+
+ JoinNode* join = new JoinNode();
+ original.Split(branch, &left, &graph_, join);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitLiteral(Literal* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ Visit(properties->at(i)->value());
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ ZoneList<Expression*>* values = expr->values();
+ for (int i = 0, len = values->length(); i < len; i++) {
+ Visit(values->at(i));
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ // Left-hand side can be a variable or property (or reference error) but
+ // not both.
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ Visit(expr->value());
+ if (var->IsStackAllocated()) definitions_.Add(expr);
+
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ if (!prop->key()->IsPropertyName()) Visit(prop->key());
+ Visit(expr->value());
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitThrow(Throw* expr) {
+ Visit(expr->exception());
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitProperty(Property* expr) {
+ Visit(expr->obj());
+ if (!expr->key()->IsPropertyName()) Visit(expr->key());
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCall(Call* expr) {
+ Visit(expr->expression());
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ Visit(arguments->at(i));
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
+ Visit(expr->expression());
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ Visit(arguments->at(i));
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ Visit(arguments->at(i));
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+ Visit(expr->expression());
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
+ Visit(expr->expression());
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (var != NULL && var->IsStackAllocated()) {
+ definitions_.Add(expr);
+ }
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+
+ switch (expr->op()) {
+ case Token::COMMA:
+ Visit(expr->right());
+ break;
+
+ case Token::OR: {
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(expr->right());
+ FlowGraph empty;
+ JoinNode* join = new JoinNode();
+ original.Split(branch, &empty, &graph_, join);
+ graph_ = original;
+ break;
+ }
+
+ case Token::AND: {
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ Visit(expr->right());
+ FlowGraph empty;
+ JoinNode* join = new JoinNode();
+ original.Split(branch, &graph_, &empty, join);
+ graph_ = original;
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ Visit(expr->right());
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+ Visit(expr->left());
+ Visit(expr->right());
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
void AstLabeler::Label(CompilationInfo* info) {
info_ = info;
VisitStatements(info_->function()->body());
@@ -204,6 +738,9 @@ void AstLabeler::VisitAssignment(Assignment* expr) {
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
+
+ prop->obj()->set_num(AstNode::kNoNumber);
+ prop->key()->set_num(AstNode::kNoNumber);
Visit(expr->value());
expr->set_num(next_number_++);
}
@@ -220,6 +757,9 @@ void AstLabeler::VisitProperty(Property* expr) {
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
+
+ expr->obj()->set_num(AstNode::kNoNumber);
+ expr->key()->set_num(AstNode::kNoNumber);
expr->set_num(next_number_++);
}
@@ -558,4 +1098,370 @@ void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
}
+#ifdef DEBUG
+
+// Print a textual representation of an instruction in a flow graph. Using
+// the AstVisitor is overkill because there is no recursion here. It is
+// only used for printing in debug mode.
+class TextInstructionPrinter: public AstVisitor {
+ public:
+ TextInstructionPrinter() {}
+
+ private:
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter);
+};
+
+
+void TextInstructionPrinter::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitBlock(Block* stmt) {
+ PrintF("Block");
+}
+
+
+void TextInstructionPrinter::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ PrintF("ExpressionStatement");
+}
+
+
+void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
+ PrintF("EmptyStatement");
+}
+
+
+void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) {
+ PrintF("IfStatement");
+}
+
+
+void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
+ PrintF("return @%d", stmt->expression()->num());
+}
+
+
+void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ PrintF("WithEnterStatement");
+}
+
+
+void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
+ PrintF("WithExitStatement");
+}
+
+
+void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ PrintF("DoWhileStatement");
+}
+
+
+void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
+ PrintF("WhileStatement");
+}
+
+
+void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) {
+ PrintF("ForStatement");
+}
+
+
+void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
+ PrintF("ForInStatement");
+}
+
+
+void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ PrintF("DebuggerStatement");
+}
+
+
+void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
+ PrintF("FunctionLiteral");
+}
+
+
+void TextInstructionPrinter::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ PrintF("FunctionBoilerplateLiteral");
+}
+
+
+void TextInstructionPrinter::VisitConditional(Conditional* expr) {
+ PrintF("Conditional");
+}
+
+
+void TextInstructionPrinter::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->AsVariable();
+ if (var != NULL) {
+ SmartPointer<char> name = var->name()->ToCString();
+ PrintF("%s", *name);
+ } else {
+ ASSERT(expr->AsProperty() != NULL);
+ VisitProperty(expr->AsProperty());
+ }
+}
+
+
+void TextInstructionPrinter::VisitLiteral(Literal* expr) {
+ expr->handle()->ShortPrint();
+}
+
+
+void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
+ PrintF("RegExpLiteral");
+}
+
+
+void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
+ PrintF("ObjectLiteral");
+}
+
+
+void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
+ PrintF("ArrayLiteral");
+}
+
+
+void TextInstructionPrinter::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ PrintF("CatchExtensionObject");
+}
+
+
+void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+
+ if (var != NULL) {
+ SmartPointer<char> name = var->name()->ToCString();
+ PrintF("%s %s @%d",
+ *name,
+ Token::String(expr->op()),
+ expr->value()->num());
+ } else if (prop != NULL) {
+ if (prop->key()->IsPropertyName()) {
+ PrintF("@%d.", prop->obj()->num());
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ prop->key()->AsLiteral()->handle()->Print();
+ PrintF(" %s @%d",
+ Token::String(expr->op()),
+ expr->value()->num());
+ } else {
+ PrintF("@%d[@%d] %s @%d",
+ prop->obj()->num(),
+ prop->key()->num(),
+ Token::String(expr->op()),
+ expr->value()->num());
+ }
+ } else {
+ // Throw reference error.
+ Visit(expr->target());
+ }
+}
+
+
+void TextInstructionPrinter::VisitThrow(Throw* expr) {
+ PrintF("throw @%d", expr->exception()->num());
+}
+
+
+void TextInstructionPrinter::VisitProperty(Property* expr) {
+ if (expr->key()->IsPropertyName()) {
+ PrintF("@%d.", expr->obj()->num());
+ ASSERT(expr->key()->AsLiteral() != NULL);
+ expr->key()->AsLiteral()->handle()->Print();
+ } else {
+ PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num());
+ }
+}
+
+
+void TextInstructionPrinter::VisitCall(Call* expr) {
+ PrintF("@%d(", expr->expression()->num());
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ if (i != 0) PrintF(", ");
+ PrintF("@%d", arguments->at(i)->num());
+ }
+ PrintF(")");
+}
+
+
+void TextInstructionPrinter::VisitCallNew(CallNew* expr) {
+ PrintF("new @%d(", expr->expression()->num());
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ if (i != 0) PrintF(", ");
+ PrintF("@%d", arguments->at(i)->num());
+ }
+ PrintF(")");
+}
+
+
+void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
+ SmartPointer<char> name = expr->name()->ToCString();
+ PrintF("%s(", *name);
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ if (i != 0) PrintF(", ");
+ PrintF("@%d", arguments->at(i)->num());
+ }
+ PrintF(")");
+}
+
+
+void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
+ PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
+}
+
+
+void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) {
+ if (expr->is_prefix()) {
+ PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
+ } else {
+ PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
+ }
+}
+
+
+void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
+ ASSERT(expr->op() != Token::COMMA);
+ ASSERT(expr->op() != Token::OR);
+ ASSERT(expr->op() != Token::AND);
+ PrintF("@%d %s @%d",
+ expr->left()->num(),
+ Token::String(expr->op()),
+ expr->right()->num());
+}
+
+
+void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
+ PrintF("@%d %s @%d",
+ expr->left()->num(),
+ Token::String(expr->op()),
+ expr->right()->num());
+}
+
+
+void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) {
+ PrintF("ThisFunction");
+}
+
+
+static int node_count = 0;
+static int instruction_count = 0;
+
+
+void Node::AssignNumbers() {
+ set_number(node_count++);
+}
+
+
+void BlockNode::AssignNumbers() {
+ set_number(node_count++);
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->set_num(instruction_count++);
+ }
+}
+
+
+void EntryNode::PrintText() {
+ PrintF("L%d: Entry\n", number());
+ PrintF("goto L%d\n\n", successor_->number());
+}
+
+void ExitNode::PrintText() {
+ PrintF("L%d: Exit\n\n", number());
+}
+
+
+void BlockNode::PrintText() {
+ // Print the instructions in the block.
+ PrintF("L%d: Block\n", number());
+ TextInstructionPrinter printer;
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ PrintF("%d ", instructions_[i]->num());
+ printer.Visit(instructions_[i]);
+ PrintF("\n");
+ }
+ PrintF("goto L%d\n\n", successor_->number());
+}
+
+
+void BranchNode::PrintText() {
+ PrintF("L%d: Branch\n", number());
+ PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number());
+}
+
+
+void JoinNode::PrintText() {
+ PrintF("L%d: Join(", number());
+ for (int i = 0, len = predecessors_.length(); i < len; i++) {
+ if (i != 0) PrintF(", ");
+ PrintF("L%d", predecessors_[i]->number());
+ }
+ PrintF(")\ngoto L%d\n\n", successor_->number());
+}
+
+
+void FlowGraph::PrintText(ZoneList<Node*>* postorder) {
+ PrintF("\n========\n");
+
+ // Number nodes and instructions in reverse postorder.
+ node_count = 0;
+ instruction_count = 0;
+ for (int i = postorder->length() - 1; i >= 0; i--) {
+ postorder->at(i)->AssignNumbers();
+ }
+
+ // Print basic blocks in reverse postorder.
+ for (int i = postorder->length() - 1; i >= 0; i--) {
+ postorder->at(i)->PrintText();
+ }
+}
+
+
+#endif // defined(DEBUG)
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 2331944403..2dc2d73275 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -28,12 +28,403 @@
#ifndef V8_DATAFLOW_H_
#define V8_DATAFLOW_H_
+#include "v8.h"
+
#include "ast.h"
#include "compiler.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
+class BitVector: public ZoneObject {
+ public:
+ explicit BitVector(int length)
+ : length_(length),
+ data_length_(SizeFor(length)),
+ data_(Zone::NewArray<uint32_t>(data_length_)) {
+ ASSERT(length > 0);
+ Clear();
+ }
+
+ BitVector(const BitVector& other)
+ : length_(other.length()),
+ data_length_(SizeFor(length_)),
+ data_(Zone::NewArray<uint32_t>(data_length_)) {
+ CopyFrom(other);
+ }
+
+ static int SizeFor(int length) {
+ return 1 + ((length - 1) / 32);
+ }
+
+ BitVector& operator=(const BitVector& rhs) {
+ if (this != &rhs) CopyFrom(rhs);
+ return *this;
+ }
+
+ void CopyFrom(const BitVector& other) {
+ ASSERT(other.length() == length());
+ for (int i = 0; i < data_length_; i++) {
+ data_[i] = other.data_[i];
+ }
+ }
+
+ bool Contains(int i) {
+ ASSERT(i >= 0 && i < length());
+ uint32_t block = data_[i / 32];
+ return (block & (1U << (i % 32))) != 0;
+ }
+
+ void Add(int i) {
+ ASSERT(i >= 0 && i < length());
+ data_[i / 32] |= (1U << (i % 32));
+ }
+
+ void Remove(int i) {
+ ASSERT(i >= 0 && i < length());
+ data_[i / 32] &= ~(1U << (i % 32));
+ }
+
+ void Union(const BitVector& other) {
+ ASSERT(other.length() == length());
+ for (int i = 0; i < data_length_; i++) {
+ data_[i] |= other.data_[i];
+ }
+ }
+
+ void Intersect(const BitVector& other) {
+ ASSERT(other.length() == length());
+ for (int i = 0; i < data_length_; i++) {
+ data_[i] &= other.data_[i];
+ }
+ }
+
+ void Clear() {
+ for (int i = 0; i < data_length_; i++) {
+ data_[i] = 0;
+ }
+ }
+
+ bool IsEmpty() const {
+ for (int i = 0; i < data_length_; i++) {
+ if (data_[i] != 0) return false;
+ }
+ return true;
+ }
+
+ int length() const { return length_; }
+
+ private:
+ int length_;
+ int data_length_;
+ uint32_t* data_;
+};
+
+
+// Forward declarations of Node types.
+class Node;
+class BranchNode;
+class JoinNode;
+
+// Flow graphs have a single entry and single exit. The empty flowgraph is
+// represented by both entry and exit being NULL.
+class FlowGraph BASE_EMBEDDED {
+ public:
+ FlowGraph() : entry_(NULL), exit_(NULL) {}
+
+ static FlowGraph Empty() { return FlowGraph(); }
+
+ bool is_empty() const { return entry_ == NULL; }
+ Node* entry() const { return entry_; }
+ Node* exit() const { return exit_; }
+
+ // Add a single instruction to the end of this flowgraph.
+ void AppendInstruction(AstNode* instruction);
+
+ // Add a single node to the end of this flow graph.
+ void AppendNode(Node* node);
+
+ // Add a flow graph fragment to the end of this one.
+ void AppendGraph(FlowGraph* graph);
+
+ // Concatenate an if-then-else flow-graph to this one. Control is split
+ // and merged, so the graph remains single-entry, single-exit.
+ void Split(BranchNode* branch,
+ FlowGraph* left,
+ FlowGraph* right,
+ JoinNode* merge);
+
+ // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
+ // one. Control is split by the condition and merged back from the back
+ // edge at end of the body to the beginning of the condition. The single
+ // (free) exit of the result graph is the right (false) arm of the branch
+ // node.
+ void Loop(JoinNode* merge,
+ FlowGraph* condition,
+ BranchNode* branch,
+ FlowGraph* body);
+
+#ifdef DEBUG
+ void PrintText(ZoneList<Node*>* postorder);
+#endif
+
+ private:
+ Node* entry_;
+ Node* exit_;
+};
+
+
+// Flow-graph nodes.
+class Node: public ZoneObject {
+ public:
+ Node() : number_(-1), mark_(false) {}
+
+ virtual ~Node() {}
+
+ virtual bool IsBlockNode() { return false; }
+ virtual bool IsJoinNode() { return false; }
+
+ virtual void AddPredecessor(Node* predecessor) = 0;
+ virtual void AddSuccessor(Node* successor) = 0;
+
+ bool IsMarkedWith(bool mark) { return mark_ == mark; }
+ void MarkWith(bool mark) { mark_ = mark; }
+
+ // Perform a depth first search and record preorder and postorder
+ // traversal orders.
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) = 0;
+
+ int number() { return number_; }
+ void set_number(int number) { number_ = number; }
+
+#ifdef DEBUG
+ virtual void AssignNumbers();
+ virtual void PrintText() = 0;
+#endif
+
+ private:
+ int number_;
+ bool mark_;
+
+ DISALLOW_COPY_AND_ASSIGN(Node);
+};
+
+
+// An entry node has no predecessors and a single successor.
+class EntryNode: public Node {
+ public:
+ EntryNode() : successor_(NULL) {}
+
+ void AddPredecessor(Node* predecessor) { UNREACHABLE(); }
+
+ void AddSuccessor(Node* successor) {
+ ASSERT(successor_ == NULL && successor != NULL);
+ successor_ = successor;
+ }
+
+ void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+#ifdef DEBUG
+ void PrintText();
+#endif
+
+ private:
+ Node* successor_;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryNode);
+};
+
+
+// An exit node has a arbitrarily many predecessors and no successors.
+class ExitNode: public Node {
+ public:
+ ExitNode() : predecessors_(4) {}
+
+ void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor != NULL);
+ predecessors_.Add(predecessor);
+ }
+
+ void AddSuccessor(Node* successor) { /* Do nothing. */ }
+
+ void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+#ifdef DEBUG
+ void PrintText();
+#endif
+
+ private:
+ ZoneList<Node*> predecessors_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExitNode);
+};
+
+
+// Block nodes have a single successor and predecessor and a list of
+// instructions.
+class BlockNode: public Node {
+ public:
+ BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
+
+ static BlockNode* cast(Node* node) {
+ ASSERT(node->IsBlockNode());
+ return reinterpret_cast<BlockNode*>(node);
+ }
+
+ bool IsBlockNode() { return true; }
+
+ void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor_ == NULL && predecessor != NULL);
+ predecessor_ = predecessor;
+ }
+
+ void AddSuccessor(Node* successor) {
+ ASSERT(successor_ == NULL && successor != NULL);
+ successor_ = successor;
+ }
+
+ void AddInstruction(AstNode* instruction) {
+ instructions_.Add(instruction);
+ }
+
+ void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+#ifdef DEBUG
+ void AssignNumbers();
+ void PrintText();
+#endif
+
+ private:
+ Node* predecessor_;
+ Node* successor_;
+ ZoneList<AstNode*> instructions_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockNode);
+};
+
+
+// Branch nodes have a single predecessor and a pair of successors.
+class BranchNode: public Node {
+ public:
+ BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
+
+ void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor_ == NULL && predecessor != NULL);
+ predecessor_ = predecessor;
+ }
+
+ void AddSuccessor(Node* successor) {
+ ASSERT(successor1_ == NULL && successor != NULL);
+ if (successor0_ == NULL) {
+ successor0_ = successor;
+ } else {
+ successor1_ = successor;
+ }
+ }
+
+ void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+#ifdef DEBUG
+ void PrintText();
+#endif
+
+ private:
+ Node* predecessor_;
+ Node* successor0_;
+ Node* successor1_;
+
+ DISALLOW_COPY_AND_ASSIGN(BranchNode);
+};
+
+
+// Join nodes have arbitrarily many predecessors and a single successor.
+class JoinNode: public Node {
+ public:
+ JoinNode() : predecessors_(2), successor_(NULL) {}
+
+ static JoinNode* cast(Node* node) {
+ ASSERT(node->IsJoinNode());
+ return reinterpret_cast<JoinNode*>(node);
+ }
+
+ bool IsJoinNode() { return true; }
+
+ void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor != NULL);
+ predecessors_.Add(predecessor);
+ }
+
+ void AddSuccessor(Node* successor) {
+ ASSERT(successor_ == NULL && successor != NULL);
+ successor_ = successor;
+ }
+
+ void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+#ifdef DEBUG
+ void PrintText();
+#endif
+
+ private:
+ ZoneList<Node*> predecessors_;
+ Node* successor_;
+
+ DISALLOW_COPY_AND_ASSIGN(JoinNode);
+};
+
+
+// Construct a flow graph from a function literal. Build pre- and postorder
+// traversal orders as a byproduct.
+class FlowGraphBuilder: public AstVisitor {
+ public:
+ FlowGraphBuilder()
+ : global_exit_(NULL),
+ preorder_(4),
+ postorder_(4),
+ definitions_(4) {
+ }
+
+ void Build(FunctionLiteral* lit);
+
+ FlowGraph* graph() { return &graph_; }
+
+ ZoneList<Node*>* postorder() { return &postorder_; }
+
+ private:
+ ExitNode* global_exit() { return global_exit_; }
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ FlowGraph graph_;
+ ExitNode* global_exit_;
+ ZoneList<Node*> preorder_;
+ ZoneList<Node*> postorder_;
+
+ // The flow graph builder collects a list of definitions (assignments and
+ // count operations) to stack-allocated variables to use for reaching
+ // definitions analysis.
+ ZoneList<AstNode*> definitions_;
+
+ DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
+};
+
+
// This class is used to number all expressions in the AST according to
// their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor {
diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js
index 7d8f45888f..c0180c28c2 100644
--- a/deps/v8/src/date-delay.js
+++ b/deps/v8/src/date-delay.js
@@ -113,8 +113,11 @@ function EquivalentTime(t) {
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
- var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
- return TimeClip(MakeDate(day, TimeWithinDay(t)));
+
+ var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
+ MONTH_FROM_TIME(t),
+ DATE_FROM_TIME(t));
+ return MakeDate(day, TimeWithinDay(t));
}
@@ -257,14 +260,6 @@ function TimeInYear(year) {
}
-// Compute modified Julian day from year, month, date.
-function ToJulianDay(year, month, date) {
- var jy = (month > 1) ? year : year - 1;
- var jm = (month > 1) ? month + 2 : month + 14;
- var ja = FLOOR(jy / 100);
- return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
-}
-
var four_year_cycle_table = CalculateDateTable();
@@ -359,20 +354,18 @@ function FromJulianDay(julian) {
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
- // Conversion to integers.
year = TO_INTEGER(year);
month = TO_INTEGER(month);
date = TO_INTEGER(date);
- // Overflow months into year.
- year = year + FLOOR(month/12);
- month = month % 12;
- if (month < 0) {
- month += 12;
+ if (year < kMinYear || year > kMaxYear ||
+ month < kMinMonth || month > kMaxMonth ||
+ date < kMinDate || date > kMaxDate) {
+ return $NaN;
}
- // Return days relative to Jan 1 1970.
- return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
+ // Now we rely on year, month and date being SMIs.
+ return %DateMakeDay(year, month, date);
}
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index 55c25a926c..5ba5a3bb73 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -1251,7 +1251,9 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
} else if (request.command == 'version') {
this.versionRequest_(request, response);
} else if (request.command == 'profile') {
- this.profileRequest_(request, response);
+ this.profileRequest_(request, response);
+ } else if (request.command == 'changelive') {
+ this.changeLiveRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -1954,6 +1956,52 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
};
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
+ if (!Debug.LiveEditChangeScript) {
+ return response.failed('LiveEdit feature is not supported');
+ }
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+ var script_id = request.arguments.script_id;
+ var change_pos = parseInt(request.arguments.change_pos);
+ var change_len = parseInt(request.arguments.change_len);
+ var new_string = request.arguments.new_string;
+ if (!IS_STRING(new_string)) {
+ response.failed('Argument "new_string" is not a string value');
+ return;
+ }
+
+ var scripts = %DebugGetLoadedScripts();
+
+ var the_script = null;
+ for (var i = 0; i < scripts.length; i++) {
+ if (scripts[i].id == script_id) {
+ the_script = scripts[i];
+ }
+ }
+ if (!the_script) {
+ response.failed('Script not found');
+ return;
+ }
+
+ var change_log = new Array();
+ try {
+ Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string,
+ change_log);
+ } catch (e) {
+ if (e instanceof Debug.LiveEditChangeScript.Failure) {
+ // Let's treat it as a "success" so that body with change_log will be
+ // sent back. "change_log" will have "failure" field set.
+ change_log.push( { failure: true } );
+ } else {
+ throw e;
+ }
+ }
+ response.body = {change_log: change_log};
+};
+
+
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 8c4f51d95a..959bea14de 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -39,6 +39,7 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
+#include "messages.h"
#include "natives.h"
#include "stub-cache.h"
#include "log.h"
@@ -123,7 +124,9 @@ void BreakLocationIterator::Next() {
if (RelocInfo::IsCodeTarget(rmode())) {
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) {
+ if ((code->is_inline_cache_stub() &&
+ code->kind() != Code::BINARY_OP_IC) ||
+ RelocInfo::IsConstructCall(rmode())) {
break_point_++;
return;
}
@@ -755,6 +758,12 @@ bool Debug::Load() {
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
+
+ if (FLAG_enable_liveedit) {
+ caught_exception = caught_exception ||
+ !CompileDebuggerScript(Natives::GetIndex("liveedit"));
+ }
+
Debugger::set_compiling_natives(false);
// Make sure we mark the debugger as not loading before we might
@@ -1337,24 +1346,26 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Find the builtin debug break function matching the calling convention
// used by the call site.
if (code->is_inline_cache_stub()) {
- if (code->is_call_stub()) {
- return ComputeCallDebugBreak(code->arguments_count());
- }
- if (code->is_load_stub()) {
- return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
- }
- if (code->is_store_stub()) {
- return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
- }
- if (code->is_keyed_load_stub()) {
- Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
- return result;
- }
- if (code->is_keyed_store_stub()) {
- Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
- return result;
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ return ComputeCallDebugBreak(code->arguments_count());
+
+ case Code::LOAD_IC:
+ return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+
+ case Code::STORE_IC:
+ return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+
+ case Code::KEYED_LOAD_IC:
+ return Handle<Code>(
+ Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+
+ case Code::KEYED_STORE_IC:
+ return Handle<Code>(
+ Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+
+ default:
+ UNREACHABLE();
}
}
if (RelocInfo::IsConstructCall(mode)) {
@@ -1959,7 +1970,8 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
// Handle debugger actions when a new script is compiled.
-void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
+void Debugger::OnAfterCompile(Handle<Script> script,
+ AfterCompileFlags after_compile_flags) {
HandleScope scope;
// Add the newly compiled script to the script cache.
@@ -2006,7 +2018,7 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
return;
}
// Bail out based on state or if there is no listener for this event
- if (in_debugger) return;
+ if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
if (!Debugger::EventActive(v8::AfterCompile)) return;
// Create the compile state object.
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index cab9e8e447..546512b113 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -604,8 +604,13 @@ class Debugger {
static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
static void OnException(Handle<Object> exception, bool uncaught);
static void OnBeforeCompile(Handle<Script> script);
+
+ enum AfterCompileFlags {
+ NO_AFTER_COMPILE_FLAGS,
+ SEND_WHEN_DEBUGGING
+ };
static void OnAfterCompile(Handle<Script> script,
- Handle<JSFunction> fun);
+ AfterCompileFlags after_compile_flags);
static void OnNewFunction(Handle<JSFunction> fun);
static void OnScriptCollected(int id);
static void ProcessDebugEvent(v8::DebugEvent event,
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 2a347cd6fd..36911da245 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -30,11 +30,12 @@
#include "globals.h"
#include "heap.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ZoneScopeInfo;
// Interface for handle based allocation.
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
index e96daf65b5..a0282bbc42 100644
--- a/deps/v8/src/fast-codegen.h
+++ b/deps/v8/src/fast-codegen.h
@@ -93,6 +93,7 @@ class FastCodeGenerator: public AstVisitor {
Register accumulator1();
Register scratch0();
Register scratch1();
+ Register scratch2();
Register receiver_reg();
Register context_reg();
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 6e22d5bbc2..573d893903 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -153,6 +153,9 @@ DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
+DEFINE_bool(safe_int32_compiler, false,
+ "enable optimized side-effect-free int32 expressions.")
+DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -163,6 +166,7 @@ DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
+DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
@@ -230,9 +234,6 @@ DEFINE_bool(trace_exception, false,
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
-// usage-analyzer.cc
-DEFINE_bool(usage_computation, true, "compute variable usage counts")
-
// v8.cc
DEFINE_bool(preemption, false,
"activate a 100ms timer that switches between V8 threads")
@@ -304,6 +305,8 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
+DEFINE_bool(print_graph_text, false,
+ "print a text representation of the flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/deps/v8/src/frame-element.cc b/deps/v8/src/frame-element.cc
index 14555596ad..ee7be95f1a 100644
--- a/deps/v8/src/frame-element.cc
+++ b/deps/v8/src/frame-element.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "frame-element.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
index 5762814ff2..3c5a38952a 100644
--- a/deps/v8/src/frame-element.h
+++ b/deps/v8/src/frame-element.h
@@ -30,6 +30,7 @@
#include "number-info.h"
#include "macro-assembler.h"
+#include "zone.h"
namespace v8 {
namespace internal {
@@ -53,23 +54,25 @@ class FrameElement BASE_EMBEDDED {
SYNCED
};
- inline NumberInfo::Type number_info() {
+ inline NumberInfo number_info() {
// Copied elements do not have number info. Instead
// we have to inspect their backing element in the frame.
ASSERT(!is_copy());
- if (!is_constant()) return NumberInfoField::decode(value_);
+ if (!is_constant()) {
+ return NumberInfo::FromInt(NumberInfoField::decode(value_));
+ }
Handle<Object> value = handle();
- if (value->IsSmi()) return NumberInfo::kSmi;
- if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
- return NumberInfo::kUnknown;
+ if (value->IsSmi()) return NumberInfo::Smi();
+ if (value->IsHeapNumber()) return NumberInfo::HeapNumber();
+ return NumberInfo::Unknown();
}
- inline void set_number_info(NumberInfo::Type info) {
+ inline void set_number_info(NumberInfo info) {
// Copied elements do not have number info. Instead
// we have to inspect their backing element in the frame.
ASSERT(!is_copy());
value_ = value_ & ~NumberInfoField::mask();
- value_ = value_ | NumberInfoField::encode(info);
+ value_ = value_ | NumberInfoField::encode(info.ToInt());
}
// The default constructor creates an invalid frame element.
@@ -77,7 +80,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(INVALID)
| CopiedField::encode(false)
| SyncedField::encode(false)
- | NumberInfoField::encode(NumberInfo::kUninitialized)
+ | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| DataField::encode(0);
}
@@ -88,7 +91,7 @@ class FrameElement BASE_EMBEDDED {
}
// Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement(NumberInfo::Type info) {
+ static FrameElement MemoryElement(NumberInfo info) {
FrameElement result(MEMORY, no_reg, SYNCED, info);
return result;
}
@@ -96,7 +99,7 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg,
SyncFlag is_synced,
- NumberInfo::Type info) {
+ NumberInfo info) {
return FrameElement(REGISTER, reg, is_synced, info);
}
@@ -210,11 +213,11 @@ class FrameElement BASE_EMBEDDED {
FrameElement(Type type,
Register reg,
SyncFlag is_synced,
- NumberInfo::Type info) {
+ NumberInfo info) {
value_ = TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
- | NumberInfoField::encode(info)
+ | NumberInfoField::encode(info.ToInt())
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
}
@@ -223,7 +226,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
- | NumberInfoField::encode(NumberInfo::kUninitialized)
+ | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@@ -252,8 +255,8 @@ class FrameElement BASE_EMBEDDED {
class TypeField: public BitField<Type, 0, 3> {};
class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<bool, 4, 1> {};
- class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
- class DataField: public BitField<uint32_t, 8, 32 - 8> {};
+ class NumberInfoField: public BitField<int, 5, 4> {};
+ class DataField: public BitField<uint32_t, 9, 32 - 9> {};
friend class VirtualFrame;
};
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 5d88265c79..3bf4c93e39 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -32,7 +32,6 @@
#include "scopeinfo.h"
#include "string-stream.h"
#include "top.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 8f6f47c656..3840feef43 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -261,6 +261,8 @@ template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
class Script;
class Slot;
class Smi;
+template <typename Config, class Allocator = FreeStoreAllocationPolicy>
+ class SplayTree;
class Statement;
class String;
class Struct;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 971c9164a2..c71d92bb15 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -203,7 +203,7 @@ void TransformToFastProperties(Handle<JSObject> object,
void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->TryFlattenIfNotFlat());
+ CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
ASSERT(string->IsFlat());
}
@@ -283,6 +283,12 @@ Handle<Object> GetProperty(Handle<Object> obj,
}
+Handle<Object> GetElement(Handle<Object> obj,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(Runtime::GetElement(obj, index), Object);
+}
+
+
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@@ -362,8 +368,11 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
}
-Handle<String> SubString(Handle<String> str, int start, int end) {
- CALL_HEAP_FUNCTION(str->SubString(start, end), String);
+Handle<String> SubString(Handle<String> str,
+ int start,
+ int end,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(str->SubString(start, end, pretenure), String);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 90e51fa56c..0c137a4ce7 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -233,6 +233,9 @@ Handle<Object> GetProperty(Handle<JSObject> obj,
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
+Handle<Object> GetElement(Handle<Object> obj,
+ uint32_t index);
+
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@@ -287,7 +290,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
-Handle<String> SubString(Handle<String> str, int start, int end);
+Handle<String> SubString(Handle<String> str,
+ int start,
+ int end,
+ PretenureFlag pretenure = NOT_TENURED);
// Sets the expected number of properties for the function's instances.
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index f18bf0f61b..1acdb2a081 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -187,6 +187,18 @@ void Heap::RecordWrite(Address address, int offset) {
}
+void Heap::RecordWrites(Address address, int start, int len) {
+ if (new_space_.Contains(address)) return;
+ ASSERT(!new_space_.FromSpaceContains(address));
+ for (int offset = start;
+ offset < start + len * kPointerSize;
+ offset += kPointerSize) {
+ SLOW_ASSERT(Contains(address + offset));
+ Page::SetRSet(address, offset);
+ }
+}
+
+
OldSpace* Heap::TargetSpace(HeapObject* object) {
InstanceType type = object->map()->instance_type();
AllocationSpace space = TargetSpaceId(type);
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 3cb65eeecb..90544f1174 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -31,6 +31,7 @@
#include "frames-inl.h"
#include "global-handles.h"
#include "string-stream.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index c615942bf7..d6f26505cd 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -28,6 +28,8 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
+#include "zone.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 9ff57d251e..fbe04640db 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -46,6 +46,7 @@
#include "arm/regexp-macro-assembler-arm.h"
#endif
+
namespace v8 {
namespace internal {
@@ -371,11 +372,6 @@ void Heap::CollectAllGarbage(bool force_compaction) {
}
-void Heap::NotifyContextDisposed() {
- contexts_disposed_++;
-}
-
-
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@@ -545,12 +541,22 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
+ GCTracer::ExternalScope scope(tracer);
global_gc_prologue_callback_();
}
EnsureFromSpaceIsCommitted();
+
+ // Perform mark-sweep with optional compaction.
if (collector == MARK_COMPACTOR) {
MarkCompact(tracer);
+ }
+
+ // Always perform a scavenge to make room in new space.
+ Scavenge();
+ // Update the old space promotion limits after the scavenge due to
+ // promotions during scavenge.
+ if (collector == MARK_COMPACTOR) {
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
@@ -558,12 +564,12 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
}
- Scavenge();
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
+ GCTracer::ExternalScope scope(tracer);
GlobalHandles::PostGarbageCollectionProcessing();
}
@@ -578,6 +584,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
+ GCTracer::ExternalScope scope(tracer);
global_gc_epilogue_callback_();
}
VerifySymbolTable();
@@ -1209,6 +1216,16 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
}
+Object* Heap::AllocateCodeCache() {
+ Object* result = AllocateStruct(CODE_CACHE_TYPE);
+ if (result->IsFailure()) return result;
+ CodeCache* code_cache = CodeCache::cast(result);
+ code_cache->set_default_cache(empty_fixed_array());
+ code_cache->set_normal_type_cache(undefined_value());
+ return code_cache;
+}
+
+
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@@ -1625,7 +1642,7 @@ bool Heap::CreateInitialObjects() {
if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
- obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
+ obj = AllocateFixedArray(String::kMaxAsciiCharCode+1, TENURED);
if (obj->IsFailure()) return false;
set_single_character_string_cache(FixedArray::cast(obj));
@@ -1659,7 +1676,7 @@ Object* Heap::InitializeNumberStringCache() {
// max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
int number_string_cache_size = max_semispace_size_ / 512;
number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
- Object* obj = AllocateFixedArray(number_string_cache_size * 2);
+ Object* obj = AllocateFixedArray(number_string_cache_size * 2, TENURED);
if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
return obj;
}
@@ -1982,7 +1999,8 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Object* Heap::AllocateSubString(String* buffer,
int start,
- int end) {
+ int end,
+ PretenureFlag pretenure) {
int length = end - start;
if (length == 1) {
@@ -1998,16 +2016,13 @@ Object* Heap::AllocateSubString(String* buffer,
}
// Make an attempt to flatten the buffer to reduce access time.
- if (!buffer->IsFlat()) {
- buffer->TryFlatten();
- }
+ buffer->TryFlatten();
Object* result = buffer->IsAsciiRepresentation()
- ? AllocateRawAsciiString(length)
- : AllocateRawTwoByteString(length);
+ ? AllocateRawAsciiString(length, pretenure )
+ : AllocateRawTwoByteString(length, pretenure);
if (result->IsFailure()) return result;
String* string_result = String::cast(result);
-
// Copy the characters into the new object.
if (buffer->IsAsciiRepresentation()) {
ASSERT(string_result->IsAsciiRepresentation());
@@ -2957,6 +2972,18 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
}
+Object* Heap::AllocateUninitializedFixedArray(int length) {
+ if (length == 0) return empty_fixed_array();
+
+ Object* obj = AllocateRawFixedArray(length);
+ if (obj->IsFailure()) return obj;
+
+ reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
+ FixedArray::cast(obj)->set_length(length);
+ return obj;
+}
+
+
Object* Heap::AllocateFixedArrayWithHoles(int length) {
if (length == 0) return empty_fixed_array();
Object* result = AllocateRawFixedArray(length);
@@ -2966,18 +2993,17 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
// Initialize body.
- Object* value = the_hole_value();
- for (int index = 0; index < length; index++) {
- ASSERT(!Heap::InNewSpace(value)); // value = the hole
- array->set(index, value, SKIP_WRITE_BARRIER);
- }
+ ASSERT(!Heap::InNewSpace(the_hole_value()));
+ MemsetPointer(HeapObject::RawField(array, FixedArray::kHeaderSize),
+ the_hole_value(),
+ length);
}
return result;
}
-Object* Heap::AllocateHashTable(int length) {
- Object* result = Heap::AllocateFixedArray(length);
+Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
+ Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
@@ -3060,13 +3086,7 @@ bool Heap::IdleNotification() {
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
- if (!FLAG_expose_gc && (contexts_disposed_ > 0)) {
- HistogramTimerScope scope(&Counters::gc_context);
- CollectAllGarbage(false);
- ASSERT(contexts_disposed_ == 0);
- return false;
- }
-
+ bool uncommit = true;
bool finished = false;
if (last_gc_count == gc_count_) {
@@ -3077,7 +3097,12 @@ bool Heap::IdleNotification() {
}
if (number_idle_notifications == kIdlesBeforeScavenge) {
- CollectGarbage(0, NEW_SPACE);
+ if (contexts_disposed_ > 0) {
+ HistogramTimerScope scope(&Counters::gc_context);
+ CollectAllGarbage(false);
+ } else {
+ CollectGarbage(0, NEW_SPACE);
+ }
new_space_.Shrink();
last_gc_count = gc_count_;
@@ -3097,10 +3122,29 @@ bool Heap::IdleNotification() {
last_gc_count = gc_count_;
number_idle_notifications = 0;
finished = true;
+
+ } else if (contexts_disposed_ > 0) {
+ if (FLAG_expose_gc) {
+ contexts_disposed_ = 0;
+ } else {
+ HistogramTimerScope scope(&Counters::gc_context);
+ CollectAllGarbage(false);
+ last_gc_count = gc_count_;
+ }
+ // If this is the first idle notification, we reset the
+ // notification count to avoid letting idle notifications for
+ // context disposal garbage collections start a potentially too
+ // aggressive idle GC cycle.
+ if (number_idle_notifications <= 1) {
+ number_idle_notifications = 0;
+ uncommit = false;
+ }
}
- // Uncommit unused memory in new space.
- Heap::UncommitFromSpace();
+ // Make sure that we have no pending context disposals and
+ // conditionally uncommit from space.
+ ASSERT(contexts_disposed_ == 0);
+ if (uncommit) Heap::UncommitFromSpace();
return finished;
}
@@ -4062,6 +4106,7 @@ void Heap::TracePathToGlobal() {
GCTracer::GCTracer()
: start_time_(0.0),
start_size_(0.0),
+ external_time_(0.0),
gc_count_(0),
full_gc_count_(0),
is_compacting_(false),
@@ -4079,10 +4124,12 @@ GCTracer::GCTracer()
GCTracer::~GCTracer() {
if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
- PrintF("%s %.1f -> %.1f MB, %d ms.\n",
- CollectorString(),
- start_size_, SizeOfHeapObjects(),
- static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+ int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
+ int external_time = static_cast<int>(external_time_);
+ PrintF("%s %.1f -> %.1f MB, ",
+ CollectorString(), start_size_, SizeOfHeapObjects());
+ if (external_time > 0) PrintF("%d / ", external_time);
+ PrintF("%d ms.\n", time);
#if defined(ENABLE_LOGGING_AND_PROFILING)
Heap::PrintShortHeapStatistics();
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 50846499d4..eee5a0577f 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -30,12 +30,15 @@
#include <math.h>
-#include "zone-inl.h"
-
+#include "splay-tree-inl.h"
+#include "v8-counters.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ZoneScopeInfo;
+
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
@@ -345,6 +348,9 @@ class Heap : public AllStatic {
// Allocate a map for the specified function
static Object* AllocateInitialMap(JSFunction* fun);
+ // Allocates an empty code cache.
+ static Object* AllocateCodeCache();
+
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
@@ -449,9 +455,16 @@ class Heap : public AllStatic {
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
- // Allocate uninitialized, non-tenured fixed array with length elements.
+ // Allocates a fixed array initialized with undefined values
static Object* AllocateFixedArray(int length);
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ //
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateUninitializedFixedArray(int length);
+
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
static Object* CopyFixedArray(FixedArray* src);
@@ -464,7 +477,8 @@ class Heap : public AllStatic {
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
- static Object* AllocateHashTable(int length);
+ static Object* AllocateHashTable(int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
static Object* AllocateGlobalContext();
@@ -556,7 +570,8 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateSubString(String* buffer,
int start,
- int end);
+ int end,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a new external string object, which is backed by a string
// resource that resides outside the V8 heap.
@@ -633,7 +648,7 @@ class Heap : public AllStatic {
static void CollectAllGarbage(bool force_compaction);
// Notify the heap that a context has been disposed.
- static void NotifyContextDisposed();
+ static int NotifyContextDisposed() { return ++contexts_disposed_; }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -765,6 +780,9 @@ class Heap : public AllStatic {
// Write barrier support for address[offset] = o.
static inline void RecordWrite(Address address, int offset);
+ // Write barrier support for address[start : start + len[ = o.
+ static inline void RecordWrites(Address address, int start, int len);
+
// Given an address occupied by a live code object, return that object.
static Object* FindCodeObject(Address a);
@@ -1518,8 +1536,23 @@ class DisableAssertNoAllocation {
class GCTracer BASE_EMBEDDED {
public:
- GCTracer();
+ // Time spent while in the external scope counts towards the
+ // external time in the tracer and will be reported separately.
+ class ExternalScope BASE_EMBEDDED {
+ public:
+ explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
+ start_time_ = OS::TimeCurrentMillis();
+ }
+ ~ExternalScope() {
+ tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
+ }
+
+ private:
+ GCTracer* tracer_;
+ double start_time_;
+ };
+ GCTracer();
~GCTracer();
// Sets the collector.
@@ -1553,6 +1586,9 @@ class GCTracer BASE_EMBEDDED {
double start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
+ // Keep track of the amount of time spent in external callbacks.
+ double external_time_;
+
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
@@ -1608,6 +1644,7 @@ class TranscendentalCache {
if (e.in[0] == c.integers[0] &&
e.in[1] == c.integers[1]) {
ASSERT(e.output != NULL);
+ Counters::transcendental_cache_hit.Increment();
return e.output;
}
double answer = Calculate(input);
@@ -1617,6 +1654,7 @@ class TranscendentalCache {
elements_[hash].in[1] = c.integers[1];
elements_[hash].output = heap_number;
}
+ Counters::transcendental_cache_miss.Increment();
return heap_number;
}
@@ -1657,6 +1695,17 @@ class TranscendentalCache {
hash ^= hash >> 8;
return (hash & (kCacheSize - 1));
}
+
+ static Address cache_array_address() {
+ // Used to create an external reference.
+ return reinterpret_cast<Address>(caches_);
+ }
+
+ // Allow access to the caches_ array as an ExternalReference.
+ friend class ExternalReference;
+ // Inline implementation of the caching.
+ friend class TranscendentalCacheStub;
+
static TranscendentalCache* caches_[kNumberOfCaches];
Element elements_[kCacheSize];
Type type_;
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index ffcefe0b56..f13556bd7a 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -753,6 +753,13 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
+void Assembler::cld() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFC);
+}
+
+
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -761,6 +768,14 @@ void Assembler::rep_movs() {
}
+void Assembler::rep_stos() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0xAB);
+}
+
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1637,6 +1652,13 @@ void Assembler::fld(int i) {
}
+void Assembler::fstp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xD8, i);
+}
+
+
void Assembler::fld1() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1645,6 +1667,14 @@ void Assembler::fld1() {
}
+void Assembler::fldpi() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xEB);
+}
+
+
void Assembler::fldz() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1685,6 +1715,14 @@ void Assembler::fstp_d(const Operand& adr) {
}
+void Assembler::fst_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(edx, adr);
+}
+
+
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2012,6 +2050,17 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
}
+void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2067,6 +2116,16 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2078,6 +2137,17 @@ void Assembler::comisd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x2E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2157,6 +2227,50 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::movsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x6E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pxor(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xEF);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ptest(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x17);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 3d7af82ad0..4497e2aa70 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -93,7 +93,7 @@ const Register no_reg = { -1 };
struct XMMRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 2; } // currently
+ bool is_valid() const { return 0 <= code_ && code_ < 8; }
int code() const {
ASSERT(is_valid());
return code_;
@@ -542,8 +542,12 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
+ // Flag management.
+ void cld();
+
// Repetitive string instructions.
void rep_movs();
+ void rep_stos();
// Exchange two registers
void xchg(Register dst, Register src);
@@ -668,6 +672,7 @@ class Assembler : public Malloced {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
void call(const Operand& adr);
+ void call(const ExternalReference& target);
void call(Handle<Code> code, RelocInfo::Mode rmode);
// Jumps
@@ -683,15 +688,18 @@ class Assembler : public Malloced {
// Floating-point operations
void fld(int i);
+ void fstp(int i);
void fld1();
void fldz();
+ void fldpi();
void fld_s(const Operand& adr);
void fld_d(const Operand& adr);
void fstp_s(const Operand& adr);
void fstp_d(const Operand& adr);
+ void fst_d(const Operand& adr);
void fild_s(const Operand& adr);
void fild_d(const Operand& adr);
@@ -750,14 +758,17 @@ class Assembler : public Malloced {
void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, const Operand& src);
+ void cvtss2sd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void sqrtsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
@@ -768,6 +779,12 @@ class Assembler : public Malloced {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
+ void movd(XMMRegister dst, const Operand& src);
+ void movsd(XMMRegister dst, XMMRegister src);
+
+ void pxor(XMMRegister dst, XMMRegister src);
+ void ptest(XMMRegister dst, XMMRegister src);
+
// Debugging
void Print();
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 54ef382a39..80e421bccd 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -63,10 +63,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToRuntime expects eax to contain the number of arguments
+ // JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(Operand(eax), Immediate(num_extra_args + 1));
- __ JumpToRuntime(ExternalReference(id));
+ __ JumpToExternalReference(ExternalReference(id));
}
@@ -797,38 +797,23 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
+ ASSERT(scratch.is(edi)); // rep stos destination
+ ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
// Load the initial map from the array function.
__ mov(elements_array,
FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ test(array_size, Operand(array_size));
- __ j(not_zero, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
-
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
- __ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_half_pointer_size, // array_size is a smi.
@@ -845,7 +830,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
- __ bind(&allocated);
__ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ mov(elements_array, Factory::empty_fixed_array());
__ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
@@ -869,15 +853,6 @@ static void AllocateJSArray(MacroAssembler* masm,
__ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
- Label not_empty_2, fill_array;
- __ test(array_size, Operand(array_size));
- __ j(not_zero, &not_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ mov(FieldOperand(elements_array, Array::kLengthOffset),
- Immediate(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
@@ -885,20 +860,18 @@ static void AllocateJSArray(MacroAssembler* masm,
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
- // elements_array_end: start of next object
- __ bind(&fill_array);
if (fill_with_hole) {
- Label loop, entry;
- __ mov(scratch, Factory::the_hole_value());
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(elements_array, 0), scratch);
- __ add(Operand(elements_array), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(elements_array, Operand(elements_array_end));
- __ j(below, &loop);
+ __ lea(edi, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ __ push(eax);
+ __ mov(eax, Factory::the_hole_value());
+
+ __ cld();
+ __ rep_stos();
+
+ // Restore saved registers.
+ __ pop(eax);
}
}
@@ -920,7 +893,8 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+ Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
+ empty_array, not_empty_array;
// Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack.
@@ -936,6 +910,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ test(eax, Operand(eax));
__ j(not_zero, &argc_one_or_more);
+ __ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
edi,
@@ -958,30 +933,46 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ cmp(eax, 1);
__ j(not_equal, &argc_two_or_more);
ASSERT(kSmiTag == 0);
- __ test(Operand(esp, (push_count + 1) * kPointerSize),
- Immediate(kIntptrSignBit | kSmiTagMask));
+ __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &not_empty_array);
+
+ // The single argument passed is zero, so we jump to the code above used to
+ // handle the case of no arguments passed. To adapt the stack for that we move
+ // the return address and the pushed constructor (if pushed) one stack slot up
+ // thereby removing the passed argument. Argc is also on the stack - at the
+ // bottom - and it needs to be changed from 1 to 0 to have the call into the
+ // runtime system work in case a GC is required.
+ for (int i = push_count; i > 0; i--) {
+ __ mov(eax, Operand(esp, i * kPointerSize));
+ __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
+ }
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
+ __ push(Immediate(0)); // Treat this as a call with argc of zero.
+ __ jmp(&empty_array);
+
+ __ bind(&not_empty_array);
+ __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
__ j(not_zero, &prepare_generic_code_call);
// Handle construction of an empty array of a certain size. Get the size from
// the stack and bail out if size is to large to actually allocate an elements
// array.
- __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
- ASSERT(kSmiTag == 0);
- __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+ __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
__ j(greater_equal, &prepare_generic_code_call);
// edx: array_size (smi)
// edi: constructor
- // esp[0]: argc
+ // esp[0]: argc (cannot be 0 here)
// esp[4]: constructor (only if construct_call)
// esp[8]: return address
// esp[C]: argument
AllocateJSArray(masm,
edi,
- edx,
+ ecx,
eax,
ebx,
- ecx,
+ edx,
edi,
true,
&prepare_generic_code_call);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index bd2a994083..bdd1c4a3f4 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -126,6 +126,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
+ Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
// Initialize state.
info_ = info;
@@ -732,7 +733,27 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
Result value = frame_->Pop();
value.ToRegister();
- if (value.is_number()) {
+ if (value.is_integer32()) { // Also takes Smi case.
+ Comment cmnt(masm_, "ONLY_INTEGER_32");
+ if (FLAG_debug_code) {
+ Label ok;
+ __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ __ j(zero, &ok);
+ __ fldz();
+ __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(not_zero, &ok);
+ __ Abort("Smi was wrapped in HeapNumber in output from bitop");
+ __ bind(&ok);
+ }
+ // In the integer32 case there are no Smis hidden in heap numbers, so we
+ // need only test for Smi zero.
+ __ test(value.reg(), Operand(value.reg()));
+ dest->false_target()->Branch(zero);
+ value.Unuse();
+ dest->Split(not_zero);
+ } else if (value.is_number()) {
Comment cmnt(masm_, "ONLY_NUMBER");
// Fast case if NumberInfo indicates only numbers.
if (FLAG_debug_code) {
@@ -816,8 +837,17 @@ class FloatingPointHelper : public AllStatic {
// Takes the operands in edx and eax and loads them as integers in eax
// and ecx.
static void LoadAsIntegers(MacroAssembler* masm,
+ NumberInfo number_info,
bool use_sse3,
Label* operand_conversion_failure);
+ static void LoadNumbersAsIntegers(MacroAssembler* masm,
+ NumberInfo number_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+
// Test if operands are smis or heap numbers and load them
// into xmm0 and xmm1 if they are. Operands are in edx and eax.
// Leaves operands unchanged.
@@ -849,13 +879,14 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
- NumberInfo::ToString(operands_type_));
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@@ -867,8 +898,11 @@ class DeferredInlineBinaryOperation: public DeferredCode {
Register dst,
Register left,
Register right,
+ NumberInfo left_info,
+ NumberInfo right_info,
OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ : op_(op), dst_(dst), left_(left), right_(right),
+ left_info_(left_info), right_info_(right_info), mode_(mode) {
set_comment("[ DeferredInlineBinaryOperation");
}
@@ -879,6 +913,8 @@ class DeferredInlineBinaryOperation: public DeferredCode {
Register dst_;
Register left_;
Register right_;
+ NumberInfo left_info_;
+ NumberInfo right_info_;
OverwriteMode mode_;
};
@@ -892,18 +928,22 @@ void DeferredInlineBinaryOperation::Generate() {
CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op;
- __ test(left_, Immediate(kSmiTagMask));
- __ j(zero, &left_smi);
- __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(dst_, left_);
- }
- __ jmp(&load_right);
+ if (!left_info_.IsSmi()) {
+ __ test(left_, Immediate(kSmiTagMask));
+ __ j(zero, &left_smi);
+ if (!left_info_.IsNumber()) {
+ __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, &call_runtime);
+ }
+ __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ mov(dst_, left_);
+ }
+ __ jmp(&load_right);
- __ bind(&left_smi);
+ __ bind(&left_smi);
+ }
__ SmiUntag(left_);
__ cvtsi2sd(xmm0, Operand(left_));
__ SmiTag(left_);
@@ -915,23 +955,27 @@ void DeferredInlineBinaryOperation::Generate() {
}
__ bind(&load_right);
- __ test(right_, Immediate(kSmiTagMask));
- __ j(zero, &right_smi);
- __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
- __ jmp(&do_op);
+ if (!right_info_.IsSmi()) {
+ __ test(right_, Immediate(kSmiTagMask));
+ __ j(zero, &right_smi);
+ if (!right_info_.IsNumber()) {
+ __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, &call_runtime);
+ }
+ __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ mov(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ push(left_);
+ __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
+ __ pop(left_);
+ }
+ __ jmp(&do_op);
- __ bind(&right_smi);
+ __ bind(&right_smi);
+ }
__ SmiUntag(right_);
__ cvtsi2sd(xmm1, Operand(right_));
__ SmiTag(right_);
@@ -957,13 +1001,105 @@ void DeferredInlineBinaryOperation::Generate() {
__ pop(left_);
__ bind(&call_runtime);
}
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub stub(op_,
+ mode_,
+ NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(left_info_, right_info_));
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
__ bind(&done);
}
+static NumberInfo CalculateNumberInfo(NumberInfo operands_type,
+ Token::Value op,
+ const Result& right,
+ const Result& left) {
+ // Set NumberInfo of result according to the operation performed.
+ // Rely on the fact that smis have a 31 bit payload on ia32.
+ ASSERT(kSmiValueSize == 31);
+ switch (op) {
+ case Token::COMMA:
+ return right.number_info();
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ return operands_type;
+ case Token::BIT_AND: {
+ // Anding with positive Smis will give you a Smi.
+ if (right.is_constant() && right.handle()->IsSmi() &&
+ Smi::cast(*right.handle())->value() >= 0) {
+ return NumberInfo::Smi();
+ } else if (left.is_constant() && left.handle()->IsSmi() &&
+ Smi::cast(*left.handle())->value() >= 0) {
+ return NumberInfo::Smi();
+ }
+ return (operands_type.IsSmi())
+ ? NumberInfo::Smi()
+ : NumberInfo::Integer32();
+ }
+ case Token::BIT_OR: {
+ // Oring with negative Smis will give you a Smi.
+ if (right.is_constant() && right.handle()->IsSmi() &&
+ Smi::cast(*right.handle())->value() < 0) {
+ return NumberInfo::Smi();
+ } else if (left.is_constant() && left.handle()->IsSmi() &&
+ Smi::cast(*left.handle())->value() < 0) {
+ return NumberInfo::Smi();
+ }
+ return (operands_type.IsSmi())
+ ? NumberInfo::Smi()
+ : NumberInfo::Integer32();
+ }
+ case Token::BIT_XOR:
+ // Result is always a 32 bit integer. Smi property of inputs is preserved.
+ return (operands_type.IsSmi())
+ ? NumberInfo::Smi()
+ : NumberInfo::Integer32();
+ case Token::SAR:
+ if (left.is_smi()) return NumberInfo::Smi();
+ // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
+ return (right.is_constant() && right.handle()->IsSmi()
+ && Smi::cast(*right.handle())->value() >= 1)
+ ? NumberInfo::Smi()
+ : NumberInfo::Integer32();
+ case Token::SHR:
+ // Result is a smi if we shift by a constant >= 2, otherwise an integer32.
+ return (right.is_constant() && right.handle()->IsSmi()
+ && Smi::cast(*right.handle())->value() >= 2)
+ ? NumberInfo::Smi()
+ : NumberInfo::Integer32();
+ case Token::ADD:
+ if (operands_type.IsSmi()) {
+ // The Integer32 range is big enough to take the sum of any two Smis.
+ return NumberInfo::Integer32();
+ } else {
+ // Result could be a string or a number. Check types of inputs.
+ return operands_type.IsNumber()
+ ? NumberInfo::Number()
+ : NumberInfo::Unknown();
+ }
+ case Token::SHL:
+ return NumberInfo::Integer32();
+ case Token::SUB:
+ // The Integer32 range is big enough to take the difference of any two
+ // Smis.
+ return (operands_type.IsSmi()) ?
+ NumberInfo::Integer32() :
+ NumberInfo::Number();
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ return NumberInfo::Number();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return NumberInfo::Unknown();
+}
+
+
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode) {
@@ -1019,9 +1155,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
}
// Get number type of left and right sub-expressions.
- NumberInfo::Type operands_type =
+ NumberInfo operands_type =
NumberInfo::Combine(left.number_info(), right.number_info());
+ NumberInfo result_type = CalculateNumberInfo(operands_type, op, right, left);
+
Result answer;
if (left_is_non_smi_constant || right_is_non_smi_constant) {
// Go straight to the slow case, with no smi code.
@@ -1042,7 +1180,10 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// generate the inline Smi check code if this operation is part of a loop.
// For all other operations only inline the Smi check code for likely smis
// if the operation is part of a loop.
- if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ if (loop_nesting() > 0 &&
+ (Token::IsBitOp(op) ||
+ operands_type.IsInteger32() ||
+ type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
GenericBinaryOpStub stub(op,
@@ -1053,58 +1194,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
}
}
- // Set NumberInfo of result according to the operation performed.
- // Rely on the fact that smis have a 31 bit payload on ia32.
- ASSERT(kSmiValueSize == 31);
- NumberInfo::Type result_type = NumberInfo::kUnknown;
- switch (op) {
- case Token::COMMA:
- result_type = right.number_info();
- break;
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- result_type = operands_type;
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a number. Smi property of inputs is preserved.
- result_type = (operands_type == NumberInfo::kSmi)
- ? NumberInfo::kSmi
- : NumberInfo::kNumber;
- break;
- case Token::SAR:
- // Result is a smi if we shift by a constant >= 1, otherwise a number.
- result_type = (right.is_constant() && right.handle()->IsSmi()
- && Smi::cast(*right.handle())->value() >= 1)
- ? NumberInfo::kSmi
- : NumberInfo::kNumber;
- break;
- case Token::SHR:
- // Result is a smi if we shift by a constant >= 2, otherwise a number.
- result_type = (right.is_constant() && right.handle()->IsSmi()
- && Smi::cast(*right.handle())->value() >= 2)
- ? NumberInfo::kSmi
- : NumberInfo::kNumber;
- break;
- case Token::ADD:
- // Result could be a string or a number. Check types of inputs.
- result_type = NumberInfo::IsNumber(operands_type)
- ? NumberInfo::kNumber
- : NumberInfo::kUnknown;
- break;
- case Token::SHL:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- result_type = NumberInfo::kNumber;
- break;
- default:
- UNREACHABLE();
- }
answer.set_number_info(result_type);
frame_->Push(&answer);
}
@@ -1191,6 +1280,12 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
}
+static void CheckTwoForSminess(MacroAssembler* masm,
+ Register left, Register right, Register scratch,
+ NumberInfo left_info, NumberInfo right_info,
+ DeferredInlineBinaryOperation* deferred);
+
+
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
@@ -1271,6 +1366,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
(op == Token::DIV) ? eax : edx,
left->reg(),
right->reg(),
+ left->number_info(),
+ right->number_info(),
overwrite_mode);
if (left->reg().is(right->reg())) {
__ test(left->reg(), Immediate(kSmiTagMask));
@@ -1368,11 +1465,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
answer.reg(),
left->reg(),
ecx,
+ left->number_info(),
+ right->number_info(),
overwrite_mode);
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(ecx));
- __ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
+ left->number_info(), right->number_info(), deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
@@ -1442,16 +1539,12 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
answer.reg(),
left->reg(),
right->reg(),
+ left->number_info(),
+ right->number_info(),
overwrite_mode);
- if (left->reg().is(right->reg())) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- } else {
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
- __ test(answer.reg(), Immediate(kSmiTagMask));
- }
- deferred->Branch(not_zero);
+ CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
+ left->number_info(), right->number_info(), deferred);
+
__ mov(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
@@ -1520,13 +1613,16 @@ class DeferredInlineSmiOperation: public DeferredCode {
DeferredInlineSmiOperation(Token::Value op,
Register dst,
Register src,
+ NumberInfo number_info,
Smi* value,
OverwriteMode overwrite_mode)
: op_(op),
dst_(dst),
src_(src),
+ number_info_(number_info),
value_(value),
overwrite_mode_(overwrite_mode) {
+ if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
set_comment("[ DeferredInlineSmiOperation");
}
@@ -1536,6 +1632,7 @@ class DeferredInlineSmiOperation: public DeferredCode {
Token::Value op_;
Register dst_;
Register src_;
+ NumberInfo number_info_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1546,7 +1643,8 @@ void DeferredInlineSmiOperation::Generate() {
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(NumberInfo::Smi(), number_info_));
stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1560,9 +1658,11 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
Register dst,
Smi* value,
Register src,
+ NumberInfo number_info,
OverwriteMode overwrite_mode)
: op_(op),
dst_(dst),
+ number_info_(number_info),
value_(value),
src_(src),
overwrite_mode_(overwrite_mode) {
@@ -1574,6 +1674,7 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
private:
Token::Value op_;
Register dst_;
+ NumberInfo number_info_;
Smi* value_;
Register src_;
OverwriteMode overwrite_mode_;
@@ -1581,7 +1682,11 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub igostub(
+ op_,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(NumberInfo::Smi(), number_info_));
igostub.GenerateCall(masm_, value_, src_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1593,9 +1698,14 @@ void DeferredInlineSmiOperationReversed::Generate() {
class DeferredInlineSmiAdd: public DeferredCode {
public:
DeferredInlineSmiAdd(Register dst,
+ NumberInfo number_info,
Smi* value,
OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ : dst_(dst),
+ number_info_(number_info),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ if (number_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
set_comment("[ DeferredInlineSmiAdd");
}
@@ -1603,6 +1713,7 @@ class DeferredInlineSmiAdd: public DeferredCode {
private:
Register dst_;
+ NumberInfo number_info_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1611,7 +1722,11 @@ class DeferredInlineSmiAdd: public DeferredCode {
void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub igostub(
+ Token::ADD,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(NumberInfo::Smi(), number_info_));
igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1623,9 +1738,13 @@ void DeferredInlineSmiAdd::Generate() {
class DeferredInlineSmiAddReversed: public DeferredCode {
public:
DeferredInlineSmiAddReversed(Register dst,
+ NumberInfo number_info,
Smi* value,
OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ : dst_(dst),
+ number_info_(number_info),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAddReversed");
}
@@ -1633,6 +1752,7 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
private:
Register dst_;
+ NumberInfo number_info_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1641,7 +1761,11 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub igostub(
+ Token::ADD,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(NumberInfo::Smi(), number_info_));
igostub.GenerateCall(masm_, value_, dst_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1654,9 +1778,14 @@ void DeferredInlineSmiAddReversed::Generate() {
class DeferredInlineSmiSub: public DeferredCode {
public:
DeferredInlineSmiSub(Register dst,
+ NumberInfo number_info,
Smi* value,
OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ : dst_(dst),
+ number_info_(number_info),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
set_comment("[ DeferredInlineSmiSub");
}
@@ -1664,6 +1793,7 @@ class DeferredInlineSmiSub: public DeferredCode {
private:
Register dst_;
+ NumberInfo number_info_;
Smi* value_;
OverwriteMode overwrite_mode_;
};
@@ -1672,7 +1802,11 @@ class DeferredInlineSmiSub: public DeferredCode {
void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
__ add(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub igostub(
+ Token::SUB,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB,
+ NumberInfo::Combine(NumberInfo::Smi(), number_info_));
igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1716,17 +1850,21 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
} else {
deferred = new DeferredInlineSmiAdd(operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
}
__ add(Operand(operand->reg()), Immediate(value));
deferred->Branch(overflow);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!operand->number_info().IsSmi()) {
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
deferred->BindExit();
answer = *operand;
break;
@@ -1741,24 +1879,29 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
answer = allocator()->Allocate();
ASSERT(answer.is_valid());
__ Set(answer.reg(), Immediate(value));
- deferred = new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
+ deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ operand->reg(),
+ operand->number_info(),
+ overwrite_mode);
__ sub(answer.reg(), Operand(operand->reg()));
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
answer = *operand;
deferred = new DeferredInlineSmiSub(operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
}
deferred->Branch(overflow);
- __ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!operand->number_info().IsSmi()) {
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
deferred->BindExit();
operand->Unuse();
break;
@@ -1775,19 +1918,27 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
int shift_value = int_value & 0x1f;
operand->ToRegister();
frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
+ if (!operand->number_info().IsSmi()) {
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ operand->number_info(),
+ smi_value,
+ overwrite_mode);
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ if (shift_value > 0) {
+ __ sar(operand->reg(), shift_value);
+ __ and_(operand->reg(), ~kSmiTagMask);
+ }
+ deferred->BindExit();
+ } else {
+ if (shift_value > 0) {
+ __ sar(operand->reg(), shift_value);
+ __ and_(operand->reg(), ~kSmiTagMask);
+ }
}
- deferred->BindExit();
answer = *operand;
}
break;
@@ -1808,10 +1959,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
new DeferredInlineSmiOperation(op,
answer.reg(),
operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!operand->number_info().IsSmi()) {
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
__ mov(answer.reg(), operand->reg());
__ SmiUntag(answer.reg());
__ shr(answer.reg(), shift_value);
@@ -1853,10 +2007,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
answer.reg(),
smi_value,
right.reg(),
+ right.number_info(),
overwrite_mode);
__ mov(answer.reg(), Immediate(int_value));
__ sar(ecx, kSmiTagSize);
- deferred->Branch(carry);
+ if (!right.number_info().IsSmi()) {
+ deferred->Branch(carry);
+ }
__ shl_cl(answer.reg());
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(sign);
@@ -1875,6 +2032,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask));
@@ -1889,10 +2047,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
new DeferredInlineSmiOperation(op,
answer.reg(),
operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!operand->number_info().IsSmi()) {
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
@@ -1916,20 +2077,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
frame_->Spill(operand->reg());
DeferredCode* deferred = NULL;
if (reversed) {
- deferred = new DeferredInlineSmiOperationReversed(op,
- operand->reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
+ deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ operand->reg(),
+ smi_value,
+ operand->reg(),
+ operand->number_info(),
+ overwrite_mode);
} else {
deferred = new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
}
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!operand->number_info().IsSmi()) {
+ __ test(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value));
} else if (op == Token::BIT_XOR) {
@@ -1956,6 +2122,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
+ operand->number_info(),
smi_value,
overwrite_mode);
// Check that lowest log2(value) bits of operand are zero, and test
@@ -1987,11 +2154,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
(IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
operand->ToRegister();
frame_->Spill(operand->reg());
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ operand->number_info(),
+ smi_value,
+ overwrite_mode);
// Check for negative or non-Smi left hand side.
__ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
deferred->Branch(not_zero);
@@ -2025,6 +2194,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
static bool CouldBeNaN(const Result& result) {
+ if (result.number_info().IsSmi()) return false;
+ if (result.number_info().IsInteger32()) return false;
if (!result.is_constant()) return true;
if (!result.handle()->IsHeapNumber()) return false;
return isnan(HeapNumber::cast(*result.handle())->value());
@@ -5420,6 +5591,54 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateCharFromCode");
+ ASSERT(args->length() == 1);
+
+ Load(args->at(0));
+ Result code = frame_->Pop();
+ code.ToRegister();
+ ASSERT(code.is_valid());
+
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+
+ JumpTarget slow_case;
+ JumpTarget exit;
+
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ test(code.reg(),
+ Immediate(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ slow_case.Branch(not_zero, &code, not_taken);
+
+ __ Set(temp.reg(), Immediate(Factory::single_character_string_cache()));
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize == 1);
+ ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ascii char code.
+ __ mov(temp.reg(), FieldOperand(temp.reg(),
+ code.reg(), times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(temp.reg(), Factory::undefined_value());
+ slow_case.Branch(equal, &code, not_taken);
+ code.Unuse();
+
+ frame_->Push(&temp);
+ exit.Jump();
+
+ slow_case.Bind(&code);
+ frame_->Push(&code);
+ Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
+ frame_->Push(&result);
+
+ exit.Bind();
+}
+
+
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -5740,21 +5959,12 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
frame_->SpillAll();
- // Make sure the frame is aligned like the OS expects.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- __ mov(edi, Operand(esp)); // Save in callee-saved register.
- __ and_(esp, -kFrameAlignment);
- }
+ static const int num_arguments = 0;
+ __ PrepareCallCFunction(num_arguments, eax);
// Call V8::RandomPositiveSmi().
- __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
-
- // Restore stack pointer from callee-saved register edi.
- if (kFrameAlignment > 0) {
- __ mov(esp, Operand(edi));
- }
+ __ CallCFunction(ExternalReference::random_positive_smi_function(),
+ num_arguments);
Result result = allocator_->Allocate(eax);
frame_->Push(&result);
@@ -5823,6 +6033,269 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+// Generates the Math.pow method - only handles special cases and branches to
+// the runtime system if not.Please note - this function assumes that
+// the callsite has executed ToNumber on both arguments and that the
+// arguments are not the same identifier.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Load(args->at(0));
+ Load(args->at(1));
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
+ frame_->Push(&res);
+ } else {
+ CpuFeatures::Scope use_sse2(SSE2);
+ Label allocate_return;
+ // Load the two operands while leaving the values on the frame.
+ frame()->Dup();
+ Result exponent = frame()->Pop();
+ exponent.ToRegister();
+ frame()->Spill(exponent.reg());
+ frame()->PushElementAt(1);
+ Result base = frame()->Pop();
+ base.ToRegister();
+ frame()->Spill(base.reg());
+
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ // We can safely assume that the base and exponent is not in the same
+ // register since we only call this from one callsite (math.js).
+ ASSERT(!exponent.reg().is(base.reg()));
+ JumpTarget call_runtime;
+
+ // Save 1 in xmm3 - we need this several times later on.
+ __ mov(answer.reg(), Immediate(1));
+ __ cvtsi2sd(xmm3, Operand(answer.reg()));
+
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ test(exponent.reg(), Immediate(kSmiTagMask));
+ __ j(not_zero, &exponent_nonsmi);
+ __ test(base.reg(), Immediate(kSmiTagMask));
+ __ j(not_zero, &base_nonsmi);
+
+ // Optimized version when y is an integer.
+ Label powi;
+ __ SmiUntag(base.reg());
+ __ cvtsi2sd(xmm0, Operand(base.reg()));
+ __ jmp(&powi);
+ // exponent is smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ call_runtime.Branch(not_equal);
+
+ __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // Optimized version of pow if y is an integer.
+ __ bind(&powi);
+ __ SmiUntag(exponent.reg());
+
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ mov(base.reg(), exponent.reg());
+
+ // Get absolute value of exponent.
+ Label no_neg;
+ __ cmp(exponent.reg(), 0);
+ __ j(greater_equal, &no_neg);
+ __ neg(exponent.reg());
+ __ bind(&no_neg);
+
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
+
+ // Label allocate_and_return;
+ __ bind(&while_true);
+ __ shr(exponent.reg(), 1);
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ test(exponent.reg(), Operand(exponent.reg()));
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
+
+ // x has the original value of y - if y is negative return 1/result.
+ __ test(base.reg(), Operand(base.reg()));
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ mov(answer.reg(), Immediate(0x7FB00000));
+ __ movd(xmm0, Operand(answer.reg()));
+ __ cvtss2sd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ call_runtime.Branch(equal);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
+
+ // exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ call_runtime.Branch(not_equal);
+ __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ call_runtime.Branch(parity_even);
+
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ test(base.reg(), Immediate(kSmiTagMask));
+ __ j(not_zero, &base_not_smi);
+ __ SmiUntag(base.reg());
+ __ cvtsi2sd(xmm0, Operand(base.reg()));
+ __ jmp(&handle_special_cases);
+ __ bind(&base_not_smi);
+ __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ call_runtime.Branch(not_equal);
+ __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+ __ and_(answer.reg(), HeapNumber::kExponentMask);
+ __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ call_runtime.Branch(greater_equal);
+ __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ Label not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ mov(answer.reg(), Immediate(0xBF000000));
+ __ movd(xmm2, Operand(answer.reg()));
+ __ cvtss2sd(xmm2, xmm2);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, &not_minus_half);
+
+ // Calculates reciprocal of square root.
+ // Note that 1/sqrt(x) = sqrt(1/x))
+ __ divsd(xmm3, xmm0);
+ __ movsd(xmm1, xmm3);
+ __ sqrtsd(xmm1, xmm1);
+ __ jmp(&allocate_return);
+
+ // Test for 0.5.
+ __ bind(&not_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ comisd(xmm2, xmm1);
+ call_runtime.Branch(not_equal);
+ // Calculates square root.
+ __ movsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
+
+ JumpTarget done;
+ Label failure, success;
+ __ bind(&allocate_return);
+ // Make a copy of the frame to enable us to handle allocation
+ // failure after the JumpTarget jump.
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(answer.reg(), exponent.reg(),
+ base.reg(), &failure);
+ __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+ // Remove the two original values from the frame - we only need those
+ // in the case where we branch to runtime.
+ frame()->Drop(2);
+ exponent.Unuse();
+ base.Unuse();
+ done.Jump(&answer);
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ // If we experience an allocation failure we branch to runtime.
+ __ bind(&failure);
+ call_runtime.Bind();
+ answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+
+ done.Bind(&answer);
+ frame()->Push(&answer);
+ }
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+ frame()->Push(&result);
+ } else {
+ CpuFeatures::Scope use_sse2(SSE2);
+ // Leave original value on the frame if we need to call runtime.
+ frame()->Dup();
+ Result result = frame()->Pop();
+ result.ToRegister();
+ frame()->Spill(result.reg());
+ Label runtime;
+ Label non_smi;
+ Label load_done;
+ JumpTarget end;
+
+ __ test(result.reg(), Immediate(kSmiTagMask));
+ __ j(not_zero, &non_smi);
+ __ SmiUntag(result.reg());
+ __ cvtsi2sd(xmm0, Operand(result.reg()));
+ __ jmp(&load_done);
+ __ bind(&non_smi);
+ __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, &runtime);
+ __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+
+ __ bind(&load_done);
+ __ sqrtsd(xmm0, xmm0);
+ // A copy of the virtual frame to allow us to go to runtime after the
+ // JumpTarget jump.
+ Result scratch = allocator()->Allocate();
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
+
+ __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+ frame()->Drop(1);
+ scratch.Unuse();
+ end.Jump(&result);
+ // We only branch to runtime if we have an allocation error.
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ __ bind(&runtime);
+ result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+
+ end.Bind(&result);
+ frame()->Push(&result);
+ }
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -6682,8 +7155,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue(Register value,
Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
+ Register receiver,
+ Register scratch)
+ : value_(value),
+ key_(key),
+ receiver_(receiver),
+ scratch_(scratch) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
@@ -6695,17 +7172,65 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register value_;
Register key_;
Register receiver_;
+ Register scratch_;
Label patch_site_;
};
void DeferredReferenceSetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Push receiver and key arguments on the stack.
- __ push(receiver_);
- __ push(key_);
- // Move value argument to eax as expected by the IC stub.
- if (!value_.is(eax)) __ mov(eax, value_);
+ // Move value_ to eax, key_ to ecx, and receiver_ to edx.
+ Register old_value = value_;
+
+ // First, move value to eax.
+ if (!value_.is(eax)) {
+ if (key_.is(eax)) {
+ // Move key_ out of eax, preferably to ecx.
+ if (!value_.is(ecx) && !receiver_.is(ecx)) {
+ __ mov(ecx, key_);
+ key_ = ecx;
+ } else {
+ __ mov(scratch_, key_);
+ key_ = scratch_;
+ }
+ }
+ if (receiver_.is(eax)) {
+ // Move receiver_ out of eax, preferably to edx.
+ if (!value_.is(edx) && !key_.is(edx)) {
+ __ mov(edx, receiver_);
+ receiver_ = edx;
+ } else {
+ // Both moves to scratch are from eax, also, no valid path hits both.
+ __ mov(scratch_, receiver_);
+ receiver_ = scratch_;
+ }
+ }
+ __ mov(eax, value_);
+ value_ = eax;
+ }
+
+ // Now value_ is in eax. Move the other two to the right positions.
+ // We do not update the variables key_ and receiver_ to ecx and edx.
+ if (key_.is(ecx)) {
+ if (!receiver_.is(edx)) {
+ __ mov(edx, receiver_);
+ }
+ } else if (key_.is(edx)) {
+ if (receiver_.is(ecx)) {
+ __ xchg(edx, ecx);
+ } else {
+ __ mov(ecx, key_);
+ if (!receiver_.is(edx)) {
+ __ mov(edx, receiver_);
+ }
+ }
+ } else { // Key is not in edx or ecx.
+ if (!receiver_.is(edx)) {
+ __ mov(edx, receiver_);
+ }
+ __ mov(ecx, key_);
+ }
+
// Call the IC stub.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -6718,11 +7243,8 @@ void DeferredReferenceSetKeyedValue::Generate() {
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->test(eax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC), key and receiver
- // registers.
- if (!value_.is(eax)) __ mov(value_, eax);
- __ pop(key_);
- __ pop(receiver_);
+ // Restore value (returned from store IC) register.
+ if (!old_value.is(eax)) __ mov(old_value, eax);
}
@@ -6846,8 +7368,10 @@ Result CodeGenerator::EmitKeyedLoad() {
deferred->Branch(not_equal);
// Check that the key is a smi.
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ if (!key.is_smi()) {
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
// Get the elements array from the receiver and check that it
// is not a dictionary.
@@ -6921,7 +7445,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(result.reg(),
key.reg(),
- receiver.reg());
+ receiver.reg(),
+ tmp.reg());
// Check that the value is a smi if it is not a constant. We can skip
// the write barrier for smis and constants.
@@ -6981,7 +7506,6 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// indicate that we have generated an inline version of the
// keyed store.
__ nop();
- frame()->Drop(2);
}
ASSERT(frame()->height() == original_height - 3);
return result;
@@ -6992,6 +7516,34 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
#define __ ACCESS_MASM(masm)
+static void CheckTwoForSminess(MacroAssembler* masm,
+ Register left, Register right, Register scratch,
+ NumberInfo left_info, NumberInfo right_info,
+ DeferredInlineBinaryOperation* deferred) {
+ if (left.is(right)) {
+ if (!left_info.IsSmi()) {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+ } else if (!left_info.IsSmi()) {
+ if (!right_info.IsSmi()) {
+ __ mov(scratch, left);
+ __ or_(scratch, Operand(right));
+ __ test(scratch, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+ } else {
+ if (!right_info.IsSmi()) {
+ __ test(right, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+ }
+}
+
+
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
Property* property = expression_->AsProperty();
@@ -7123,6 +7675,7 @@ void Reference::SetValue(InitState init_state) {
Comment cmnt(masm, "[ Store to keyed Property");
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
+
Result answer = cgen_->EmitKeyedStore(property->key()->type());
cgen_->frame()->Push(&answer);
set_unloaded();
@@ -7175,7 +7728,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ push(esi);
__ push(edx);
__ push(ecx); // Restore return address.
- __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
}
@@ -7219,7 +7772,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
@@ -7274,8 +7827,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ret(3 * kPointerSize);
__ bind(&slow_case);
- ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
- __ TailCallRuntime(runtime, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
@@ -7492,6 +8044,22 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ mov(left, Operand(esp, 2 * kPointerSize));
}
+ if (static_operands_type_.IsSmi()) {
+ if (op_ == Token::BIT_OR) {
+ __ or_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_AND) {
+ __ and_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_XOR) {
+ __ xor_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ }
+ }
+
// 2. Prepare the smi check of both operands by oring them together.
Comment smi_check_comment(masm, "-- Smi check arguments");
Label not_smis;
@@ -7800,146 +8368,181 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Generate fast case smi code if requested. This flag is set when the fast
// case smi code is not generated by the caller. Generating it here will speed
// up common operations.
- if (HasSmiCodeInStub()) {
+ if (ShouldGenerateSmiCode()) {
GenerateSmiCode(masm, &call_runtime);
} else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- GenerateLoadArguments(masm);
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
}
// Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (NumberInfo::IsNumber(operands_type_)) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx,
- "GenericBinaryOpStub operand not a number.");
- __ AbortIfNotNumber(eax,
- "GenericBinaryOpStub operand not a number.");
- }
- FloatingPointHelper::LoadSSE2Operands(masm);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
}
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (NumberInfo::IsNumber(operands_type_)) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx,
- "GenericBinaryOpStub operand not a number.");
- __ AbortIfNotNumber(eax,
- "GenericBinaryOpStub operand not a number.");
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx,
+ "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(eax,
+ "GenericBinaryOpStub operand not a number.");
+ }
+ if (static_operands_type_.IsSmi()) {
+ FloatingPointHelper::LoadSSE2Smis(masm, ecx);
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm);
+ }
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
}
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ } else { // SSE2 not available, use FPU.
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx,
+ "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(eax,
+ "GenericBinaryOpStub operand not a number.");
+ }
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ }
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
}
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // Try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
}
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
+ break;
}
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
}
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm,
+ static_operands_type_,
+ use_sse3_,
+ &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
default: UNREACHABLE();
}
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
} else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result);
}
+ // Tag smi result and return.
+ __ SmiTag(eax);
GenerateReturn(masm);
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ }
+ break;
}
- break;
+ default: UNREACHABLE(); break;
}
- default: UNREACHABLE(); break;
}
// If all else fails, use the runtime system to get the correct
@@ -7947,30 +8550,40 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgsInRegisters()) {
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
+ GenerateRegisterArgsPush(masm);
}
+
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
Label not_strings, not_string1, string1, string1_smi2;
- Result answer;
- __ test(edx, Immediate(kSmiTagMask));
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in edx, eax
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+ if (HasArgsReversed()) {
+ lhs = eax;
+ rhs = edx;
+ } else {
+ lhs = edx;
+ rhs = eax;
+ }
+
+ // Test if first argument is a string.
+ __ test(lhs, Immediate(kSmiTagMask));
__ j(zero, &not_string1);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_string1);
// First argument is a string, test second.
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(rhs, Immediate(kSmiTagMask));
__ j(zero, &string1_smi2);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
@@ -7981,36 +8594,26 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
- masm, eax, edi, ebx, ecx, true, &string1);
+ masm, rhs, edi, ebx, ecx, true, &string1);
- // Call the string add stub to make the result.
- __ EnterInternalFrame();
- __ push(edx); // Original first argument.
- __ push(edi); // Number to string result for second argument.
- __ CallStub(&string_add_stub);
- __ LeaveInternalFrame();
- __ ret(2 * kPointerSize);
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ TailCallStub(&string_add_stub);
+ // Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(
- HasArgsReversed() ?
- Builtins::STRING_ADD_RIGHT :
- Builtins::STRING_ADD_LEFT,
- JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(rhs, Immediate(kSmiTagMask));
__ j(zero, &not_strings);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_strings);
// Only second argument is a string.
- __ InvokeBuiltin(
- HasArgsReversed() ?
- Builtins::STRING_ADD_LEFT :
- Builtins::STRING_ADD_RIGHT,
- JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
@@ -8050,6 +8653,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default:
UNREACHABLE();
}
+
+ // Generate an unreachable reference to the DEFAULT stub so that it can be
+ // found at the end of this stub when clearing ICs at GC.
+ if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+ GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+ __ TailCallStub(&uninit);
+ }
}
@@ -8103,10 +8713,9 @@ void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgsInRegisters()) {
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- }
+ ASSERT(!HasArgsInRegisters());
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
}
@@ -8121,30 +8730,310 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
}
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(ecx);
+ if (HasArgsReversed()) {
+ __ push(eax);
+ __ push(edx);
+ } else {
+ __ push(edx);
+ __ push(eax);
+ }
+ __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ // Keep a copy of operands on the stack and make sure they are also in
+ // edx, eax.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ } else {
+ GenerateLoadArguments(masm);
+ }
+
+ // Internal frame is necessary to handle exceptions properly.
+ __ EnterInternalFrame();
+
+ // Push arguments on stack if the stub expects them there.
+ if (!HasArgsInRegisters()) {
+ __ push(edx);
+ __ push(eax);
+ }
+ // Call the stub proper to get the result in eax.
+ __ call(&get_result);
+ __ LeaveInternalFrame();
+
+ __ pop(ecx); // Return address.
+ // Left and right arguments are now on top.
+ // Push the operation result. The tail call to BinaryOp_Patch will
+ // return it to the original caller.
+ __ push(eax);
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+ __ push(ecx); // Return address.
+
+ // Patch the caller to an appropriate specialized stub
+ // and return the operation result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 6,
+ 1);
+
+ // The entry point for the result calculation is assumed to be immediately
+ // after this sequence.
+ __ bind(&get_result);
+}
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ HandleScope scope;
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // esp[4]: argument (should be number).
+ // esp[0]: return address.
+ // Test that eax is a number.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the low and high words of the double into ebx, edx.
+ ASSERT_EQ(1, kSmiTagSize);
+ __ sar(eax, 1);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ mov(Operand(esp, 0), eax);
+ __ fild_s(Operand(esp, 0));
+ __ fst_d(Operand(esp, 0));
+ __ pop(edx);
+ __ pop(ebx);
+ __ jmp(&loaded);
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // low and high words into ebx, edx.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+
+ __ bind(&loaded);
+ // ST[0] == double value
+ // ebx = low 32 bits of double value
+ // edx = high 32 bits of double value
+ // Compute hash:
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ mov(ecx, ebx);
+ __ xor_(ecx, Operand(edx));
+ __ mov(eax, ecx);
+ __ sar(eax, 16);
+ __ xor_(ecx, Operand(eax));
+ __ mov(eax, ecx);
+ __ sar(eax, 8);
+ __ xor_(ecx, Operand(eax));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+ // ST[0] == double value.
+ // ebx = low 32 bits of double value.
+ // edx = high 32 bits of double value.
+ // ecx = TranscendentalCache::hash(double value).
+ __ mov(eax,
+ Immediate(ExternalReference::transcendental_cache_array_address()));
+ // Eax points to cache array.
+ __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // Eax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+ // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ lea(ecx, Operand(eax, ecx, times_4, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmp(ebx, Operand(ecx, 0));
+ __ j(not_equal, &cache_miss);
+ __ cmp(edx, Operand(ecx, kIntSize));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ mov(eax, Operand(ecx, 2 * kIntSize));
+ __ fstp(0);
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ // We are short on registers, so use no_reg as scratch.
+ // This gives slightly larger code.
+ __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+ GenerateOperation(masm);
+ __ mov(Operand(ecx, 0), ebx);
+ __ mov(Operand(ecx, kIntSize), edx);
+ __ mov(Operand(ecx, 2 * kIntSize), eax);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+ // Only free register is edi.
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ mov(edi, edx);
+ __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ int supported_exponent_limit =
+ (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+ __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ j(below, &in_range, taken);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmp(Operand(edi), Immediate(0x7ff00000));
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, taken);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ // NaN is represented by 0x7ff8000000000000.
+ __ push(Immediate(0x7ff80000));
+ __ push(Immediate(0));
+ __ fld_d(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ mov(edi, eax); // Save eax before using fnstsw_ax.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ test(Operand(eax), Immediate(5));
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ __ fstp(0);
+ __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
+
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
// Get the integer part of a heap number. Surprisingly, all this bit twiddling
// is faster than using the built-in instructions on floating point registers.
// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
// trashed registers.
void IntegerConvert(MacroAssembler* masm,
Register source,
+ NumberInfo number_info,
bool use_sse3,
Label* conversion_failure) {
ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
+ if (!number_info.IsInteger32() || !use_sse3) {
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ }
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
+ if (!number_info.IsInteger32()) {
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ }
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
@@ -8258,16 +9147,66 @@ void IntegerConvert(MacroAssembler* masm,
// Input: edx, eax are the left and right objects of a bit op.
// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
+ NumberInfo number_info,
+ bool use_sse3,
+ Label* conversion_failure) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2;
Label load_arg2, done;
+ if (!number_info.IsHeapNumber()) {
+ if (!number_info.IsSmi()) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ }
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+ }
+
+ __ bind(&arg1_is_object);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, number_info, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ if (!number_info.IsHeapNumber()) {
+ // Test if arg2 is a Smi.
+ if (!number_info.IsSmi()) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ }
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+ }
+
+ __ bind(&arg2_is_object);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, number_info, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ // Test if arg1 is a Smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &arg1_is_object);
+
__ SmiUntag(edx);
__ jmp(&load_arg2);
@@ -8282,15 +9221,22 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ cmp(ebx, Factory::heap_number_map());
__ j(not_equal, &check_undefined_arg1);
+
// Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ IntegerConvert(masm,
+ edx,
+ NumberInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
__ mov(edx, ecx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
__ bind(&load_arg2);
+
// Test if arg2 is a Smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &arg2_is_object);
+
__ SmiUntag(eax);
__ mov(ecx, eax);
__ jmp(&done);
@@ -8306,13 +9252,30 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(ebx, Factory::heap_number_map());
__ j(not_equal, &check_undefined_arg2);
+
// Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ IntegerConvert(masm,
+ eax,
+ NumberInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
__ bind(&done);
__ mov(eax, edx);
}
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ NumberInfo number_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ if (number_info.IsNumber()) {
+ LoadNumbersAsIntegers(masm, number_info, use_sse3, conversion_failure);
+ } else {
+ LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
+ }
+}
+
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -8549,7 +9512,11 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow, not_taken);
// Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
+ IntegerConvert(masm,
+ eax,
+ NumberInfo::Unknown(),
+ CpuFeatures::IsSupported(SSE3),
+ &slow);
// Do the bitwise operation and check if the result fits in a smi.
Label try_float;
@@ -8685,7 +9652,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
@@ -8786,7 +9753,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -8795,10 +9762,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
}
@@ -8981,48 +9948,50 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+
// Argument 7: Indicate that this is a direct call from JavaScript.
- __ push(Immediate(1));
+ __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
__ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ push(ecx);
+ __ mov(Operand(esp, 5 * kPointerSize), ecx);
// Argument 5: static offsets vector buffer.
- __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector()));
// Argument 4: End of string data
// Argument 3: Start of string data
- Label push_two_byte, push_rest;
+ Label setup_two_byte, setup_rest;
__ test(edi, Operand(edi));
__ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &push_two_byte);
+ __ j(zero, &setup_two_byte);
__ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ push(ecx); // Argument 4.
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ push(ecx); // Argument 3.
- __ jmp(&push_rest);
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest);
- __ bind(&push_two_byte);
+ __ bind(&setup_two_byte);
__ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
- __ push(ecx); // Argument 4.
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ push(ecx); // Argument 3.
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ bind(&push_rest);
+ __ bind(&setup_rest);
// Argument 2: Previous index.
- __ push(ebx);
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
// Argument 1: Subject string.
- __ push(eax);
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
// Locate the code entry and call it.
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(Operand(edx));
- // Remove arguments.
- __ add(Operand(esp), Immediate(7 * kPointerSize));
+ __ CallCFunction(edx, kRegExpExecuteArguments);
// Check the result.
Label success;
@@ -9120,7 +10089,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
}
@@ -9189,7 +10158,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
+ __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
}
@@ -9474,7 +10443,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ push(eax);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
}
@@ -9635,9 +10604,7 @@ void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
__ LeaveExitFrame(ExitFrame::MODE_NORMAL);
__ ret(0);
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
- 0,
- 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
}
@@ -10122,6 +11089,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label make_two_character_string, make_flat_ascii_string;
GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
&make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
@@ -10266,7 +11234,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
}
@@ -10330,6 +11298,7 @@ void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ mov(scratch, count);
__ sar(count, 2); // Number of doublewords to copy.
+ __ cld();
__ rep_movs();
// Find number of bytes left.
@@ -10401,10 +11370,7 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
- static const int kCapacityOffset =
- FixedArray::kHeaderSize +
- SymbolTable::kCapacityIndex * kPointerSize;
- __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
+ __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ SmiUntag(mask);
__ sub(Operand(mask), Immediate(1));
@@ -10429,16 +11395,12 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Load the entry from the symble table.
Register candidate = scratch; // Scratch register contains candidate.
- ASSERT_EQ(1, SymbolTableShape::kEntrySize);
- static const int kFirstElementOffset =
- FixedArray::kHeaderSize +
- SymbolTable::kPrefixStartIndex * kPointerSize +
- SymbolTableShape::kPrefixSize * kPointerSize;
+ ASSERT_EQ(1, SymbolTable::kEntrySize);
__ mov(candidate,
FieldOperand(symbol_table,
scratch,
times_pointer_size,
- kFirstElementOffset));
+ SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
__ cmp(candidate, Factory::undefined_value());
@@ -10592,7 +11554,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Label make_two_character_string;
GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
&make_two_character_string);
- __ ret(2 * kPointerSize);
+ __ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
// Setup registers for allocating the two character string.
@@ -10676,7 +11638,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
}
@@ -10792,7 +11754,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
#undef __
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 0fa6919774..79cad72685 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -28,6 +28,8 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
+#include "ic-inl.h"
+
namespace v8 {
namespace internal {
@@ -494,8 +496,8 @@ class CodeGenerator: public AstVisitor {
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
- // than 16 bits.
- static const int kMaxSmiInlinedBits = 16;
+ // than 17 bits.
+ static const int kMaxSmiInlinedBits = 17;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target or into the stack using
// at most 16 bits of user-controlled data per assembly operation.
@@ -563,6 +565,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateCharFromCode(ZoneList<Expression*>* args);
+
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -588,6 +593,16 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast support for Math.pow().
+ void GenerateMathPow(ZoneList<Expression*>* args);
+
+ // Fast call to transcendental functions.
+ void GenerateMathSin(ZoneList<Expression*>* args);
+ void GenerateMathCos(ZoneList<Expression*>* args);
+
+ // Fast case for sqrt
+ void GenerateMathSqrt(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -655,6 +670,22 @@ class CodeGenerator: public AstVisitor {
};
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm);
+};
+
+
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -667,18 +698,35 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags,
- NumberInfo::Type operands_type = NumberInfo::kUnknown)
+ NumberInfo operands_type)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
- name_(NULL),
- operands_type_(operands_type) {
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ if (static_operands_type_.IsSmi()) {
+ mode_ = NO_OVERWRITE;
+ }
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ use_sse3_(SSE3Bits::decode(key)),
+ static_operands_type_(NumberInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(runtime_operands_type),
+ name_(NULL) {
+ }
+
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
@@ -698,8 +746,14 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
+
+ // Number type information of operands, determined by code generator.
+ NumberInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
char* name_;
- NumberInfo::Type operands_type_; // Number type information of operands.
const char* GetName();
@@ -713,29 +767,32 @@ class GenericBinaryOpStub: public CodeStub {
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
- NumberInfo::ToString(operands_type_));
+ static_operands_type_.ToString());
}
#endif
- // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
+ // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+ class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
+ // Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
- | NumberInfoBits::encode(operands_type_);
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
}
void Generate(MacroAssembler* masm);
@@ -743,6 +800,8 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return op_ == Token::ADD || op_ == Token::SUB
@@ -757,6 +816,22 @@ class GenericBinaryOpStub: public CodeStub {
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
};
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index a9e26263f5..5d18a0354e 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -146,9 +146,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- edx : receiver
+ // -- eax : key
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
}
@@ -156,10 +157,12 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -----------------------------------
// Register eax contains an object that needs to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index a3b701645c..0d85b10e7e 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -89,6 +89,7 @@ static ByteMnemonic zero_operands_instr[] = {
{0x9E, "sahf", UNSET_OP_ORDER},
{0x99, "cdq", UNSET_OP_ORDER},
{0x9B, "fwait", UNSET_OP_ORDER},
+ {0xFC, "cld", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
@@ -679,6 +680,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break;
+ case 2: mnem = "fstp"; break;
case 3: mnem = "fstp_d"; break;
default: UnimplementedInstruction();
}
@@ -720,6 +722,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xE1: mnem = "fabs"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
+ case 0xEB: mnem = "fldpi"; break;
case 0xEE: mnem = "fldz"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
@@ -1053,7 +1056,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer(",%s", NameOfCPURegister(regop));
} else if (*data == 0x0F) {
data++;
- if (*data == 0x2F) {
+ if (*data == 0x38) {
+ data++;
+ if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("ptest %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0x2F) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@@ -1069,6 +1085,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x6E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
} else if (*data == 0x6F) {
data++;
int mod, regop, rm;
@@ -1082,6 +1104,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xEF) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pxor %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
@@ -1168,6 +1198,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const char* mnem = "?";
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
+ case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
case 0x5C: mnem = "subsd"; break;
@@ -1197,6 +1228,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (*(data+2) == 0x2C) {
data += 3;
data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ } else if (*(data+2) == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtss2sd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*(data+2) == 0x6F) {
data += 3;
int mod, regop, rm;
@@ -1216,6 +1255,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*(data+1) == 0xA5) {
data += 2;
AppendToBuffer("rep_movs");
+ } else if (*(data+1) == 0xAB) {
+ data += 2;
+ AppendToBuffer("rep_stos");
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index f1c2507712..4dcf2329a1 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -621,6 +621,7 @@ void FastCodeGenerator::EmitBitOr() {
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
+ Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index e42fcc8624..6e3ae105d1 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -56,6 +56,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) {
__ push(ebp); // Caller's frame pointer.
@@ -741,23 +742,22 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
-
if (decl->fun() != NULL) {
+ VisitForValue(prop->key(), kStack);
VisitForValue(decl->fun(), kAccumulator);
+ __ pop(ecx);
} else {
+ VisitForValue(prop->key(), kAccumulator);
+ __ mov(ecx, result_register());
__ mov(result_register(), Factory::the_hole_value());
}
+ __ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test eax instruction following the call
// indicates that none of the load was inlined.
__ nop();
-
- // Value in eax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
}
}
}
@@ -1130,7 +1130,8 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
__ push(result_register());
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
+ NO_GENERIC_BINARY_FLAGS,
+ NumberInfo::Unknown());
__ CallStub(&stub);
Apply(context, eax);
}
@@ -1251,6 +1252,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register());
}
+ __ pop(ecx);
+ if (expr->ends_initialization_block()) {
+ __ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
+ } else {
+ __ pop(edx);
+ }
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@@ -1261,15 +1268,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
+ __ pop(edx);
__ push(eax); // Result of assignment, saved even if not needed.
- // Receiver is under the key and value.
- __ push(Operand(esp, 2 * kPointerSize));
+ __ push(edx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
}
- // Receiver and key are still on stack.
- DropAndApply(2, context_, eax);
+ Apply(context_, eax);
}
@@ -1739,7 +1745,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
+ NO_GENERIC_BINARY_FLAGS,
+ NumberInfo::Unknown());
stub.GenerateCall(masm(), eax, Smi::FromInt(1));
__ bind(&done);
@@ -1777,18 +1784,20 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
+ __ pop(ecx);
+ __ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
// site for it to patch.
__ nop();
if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
+ // Result is on the stack
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(2, context_, eax);
+ Apply(context_, eax);
}
break;
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index fcc82710b6..555cd1bf9b 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -610,8 +610,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(ecx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+ ExternalReference ref = ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor));
+ __ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
GenerateMiss(masm);
@@ -621,54 +622,41 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
Label slow, fast, array, extra, check_pixel_array;
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Get the map from the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken);
- // Get the key from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
- // Get the instance type from the map of the receiver.
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // Check if the object is a JS array or not.
- __ cmp(ecx, JS_ARRAY_TYPE);
+ __ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JS object.
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &slow, not_taken);
+ __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow, not_taken);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
- // ebx: index (as a smi)
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ // ecx: key (a smi)
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &check_pixel_array, not_taken);
- // Untag the key (for checking against untagged length in the fixed array).
- __ mov(edx, Operand(ebx));
- __ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
- __ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
- // eax: value
- // ecx: FixedArray
- // ebx: index (as a smi)
+ __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
+ __ mov(ebx, Operand(ecx));
+ __ SmiUntag(ebx);
+ __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@@ -676,52 +664,51 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
- // eax: value
- // ecx: elements array
- // ebx: index (as a smi)
__ bind(&check_pixel_array);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::pixel_array_map()));
- __ j(not_equal, &slow);
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements array
+ __ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
- __ sar(ebx, kSmiTagSize); // Untag the index.
- __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
- __ mov(edx, eax); // Save the value.
- __ sar(eax, kSmiTagSize); // Untag the value.
+ __ mov(ecx, eax); // Save the value. Key is not longer needed.
+ __ SmiUntag(ecx);
{ // Clamp the value to [0..255].
Label done;
- __ test(eax, Immediate(0xFFFFFF00));
+ __ test(ecx, Immediate(0xFFFFFF00));
__ j(zero, &done);
- __ setcc(negative, eax); // 1 if negative, 0 if positive.
- __ dec_b(eax); // 0 if negative, 255 if positive.
+ __ setcc(negative, ecx); // 1 if negative, 0 if positive.
+ __ dec_b(ecx); // 0 if negative, 255 if positive.
__ bind(&done);
}
- __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
- __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
- __ mov(eax, edx); // Return the original value.
- __ ret(0);
+ __ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ __ ret(0); // Return value in eax.
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
__ bind(&extra);
// eax: value
- // edx: JSArray
- // ecx: FixedArray
- // ebx: index (as a smi)
- // flags: compare (ebx, edx.length())
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ // edi: receiver->elements, a FixedArray
+ // flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
- __ sar(ebx, kSmiTagSize); // untag
- __ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx); // untag
+ __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ j(above_equal, &slow, not_taken);
- // Restore tag and increment.
- __ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
- __ sub(Operand(ebx), Immediate(1 << kSmiTagSize)); // decrement ebx again
+ // Add 1 to receiver->length, and go to fast array write.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(1 << kSmiTagSize));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
@@ -729,28 +716,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// length is always a smi.
__ bind(&array);
// eax: value
- // edx: JSArray
- // ebx: index (as a smi)
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &check_pixel_array);
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
- __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra, not_taken);
// Fast case: Do the store.
__ bind(&fast);
// eax: value
- // ecx: FixedArray
- // ebx: index (as a smi)
- __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
- eax);
+ // ecx: key (a smi)
+ // edx: receiver
+ // edi: FixedArray receiver->elements
+ __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
// Update write barrier for the elements array address.
__ mov(edx, Operand(eax));
- __ RecordWrite(ecx, 0, edx, ebx);
+ __ RecordWrite(edi, 0, edx, ecx);
__ ret(0);
}
@@ -759,92 +744,91 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
Label slow, check_heap_number;
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
- // Get the key from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // Check that the object is a JS object.
- __ cmp(ecx, JS_OBJECT_TYPE);
+ __ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
- // edx: JSObject
- // ebx: index (as a smi)
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- Handle<Map> map(Heap::MapForExternalArrayType(array_type));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(map));
- __ j(not_equal, &slow);
+ // edx: receiver, a JSObject
+ // ecx: key, a smi
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
+ &slow, true);
// Check that the index is in range.
- __ sar(ebx, kSmiTagSize); // Untag the index.
- __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
- // ecx: elements array
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
- __ mov(edx, eax); // Save the value.
- __ sar(eax, kSmiTagSize); // Untag the value.
- __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
+ __ SmiUntag(ecx);
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
- __ push(eax);
+ __ push(ecx);
__ fild_s(Operand(esp, 0));
- __ pop(eax);
- __ fstp_s(Operand(ecx, ebx, times_4, 0));
+ __ pop(ecx);
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
- __ mov(eax, edx); // Return the original value.
- __ ret(0);
+ __ ret(0); // Return the original value.
__ bind(&check_heap_number);
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
@@ -853,14 +837,12 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, eax); // Save the value.
- __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
- // ecx: base pointer of external storage
+ // edi: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
- __ fstp_s(Operand(ecx, ebx, times_4, 0));
- __ mov(eax, edx); // Return the original value.
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Need to perform float-to-int conversion.
@@ -870,29 +852,27 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ j(parity_even, &is_nan);
if (array_type != kExternalUnsignedIntArray) {
- __ push(eax); // Make room on stack
+ __ push(ecx); // Make room on stack
__ fistp_s(Operand(esp, 0));
- __ pop(eax);
+ __ pop(ecx);
} else {
// fistp stores values as signed integers.
// To represent the entire range, we need to store as a 64-bit
// int and discard the high 32 bits.
- __ push(eax); // Make room on stack
- __ push(eax); // Make room on stack
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fistp_d(Operand(esp, 0));
- __ pop(eax);
- __ mov(Operand(esp, 0), eax);
- __ pop(eax);
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(kPointerSize));
}
- // eax: untagged integer value
+ // ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
@@ -903,21 +883,20 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
- __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
- __ and_(edi, 0x7FF0);
- __ cmp(edi, 0x7FF0);
+ __ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
+ __ and_(edx, 0x7FF0);
+ __ cmp(edx, 0x7FF0);
__ j(not_equal, &not_infinity);
- __ mov(eax, 0);
+ __ mov(ecx, 0);
__ bind(&not_infinity);
- __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
}
default:
UNREACHABLE();
break;
}
- __ mov(eax, edx); // Return the original value.
- __ ret(0);
+ __ ret(0); // Return original value.
__ bind(&is_nan);
__ ffree();
@@ -925,23 +904,22 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
- __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+ __ mov_b(Operand(edi, ebx, times_1, 0), 0);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ mov(eax, 0);
- __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ __ xor_(ecx, Operand(ecx));
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+ __ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
break;
default:
UNREACHABLE();
break;
}
- __ mov(eax, edx); // Return the original value.
- __ ret(0);
+ __ ret(0); // Return the original value.
}
// Slow case: call runtime.
@@ -1262,7 +1240,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -1377,7 +1356,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -1394,7 +1374,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -1431,7 +1411,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx);
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1478,7 +1459,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ __ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@@ -1492,38 +1474,39 @@ Object* KeyedStoreIC_Miss(Arguments args);
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
- __ pop(ecx);
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(eax);
+ __ pop(ebx);
+ __ push(edx);
__ push(ecx);
+ __ push(eax);
+ __ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
- __ pop(ecx);
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(eax);
+ __ pop(ebx);
+ __ push(edx);
__ push(ecx);
+ __ push(eax);
+ __ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
#undef __
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 5ae3fe205a..45e24fa737 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -164,7 +164,10 @@ void MacroAssembler::RecordWrite(Register object, int offset,
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
- and_(value, Heap::NewSpaceMask());
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ and_(Operand(value), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
@@ -1186,15 +1189,22 @@ Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
}
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- JumpToRuntime(ext);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
@@ -1264,7 +1274,7 @@ Object* MacroAssembler::TryPopHandleScope(Register saved, Register scratch) {
}
-void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
@@ -1615,6 +1625,41 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
}
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frameAlignment = OS::ActivationFrameAlignment();
+ if (frameAlignment != 0) {
+ // Make stack end at alignment and make room for num_arguments words
+ // and the original value of esp.
+ mov(scratch, esp);
+ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frameAlignment));
+ and_(esp, -frameAlignment);
+ mov(Operand(esp, num_arguments * kPointerSize), scratch);
+ } else {
+ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ // Trashing eax is ok as it will be the return value.
+ mov(Operand(eax), Immediate(function));
+ CallCFunction(eax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_arguments) {
+ call(Operand(function));
+ if (OS::ActivationFrameAlignment() != 0) {
+ mov(esp, Operand(esp, num_arguments * kPointerSize));
+ } else {
+ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+ }
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 69dc54ca8a..a284b63676 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -349,7 +349,6 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- // Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Call a runtime function, returning the CodeStub object called.
@@ -367,12 +366,34 @@ class MacroAssembler: public Assembler {
Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToRuntime, but also takes care of passing the number
- // of arguments.
- void TailCallRuntime(const ExternalReference& ext,
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
void PushHandleScope(Register scratch);
// Pops a handle scope using the specified scratch register and
@@ -384,7 +405,7 @@ class MacroAssembler: public Assembler {
Object* TryPopHandleScope(Register saved, Register scratch);
// Jump to a runtime routine.
- void JumpToRuntime(const ExternalReference& ext);
+ void JumpToExternalReference(const ExternalReference& ext);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index f6da693797..74b9d129cc 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -324,8 +324,8 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ push(backtrack_stackpointer());
__ push(ebx);
- const int argument_count = 3;
- FrameAlign(argument_count, ecx);
+ static const int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, ecx);
// Put arguments into allocated stack area, last argument highest on stack.
// Parameters are
// Address byte_offset1 - Address captured substring's start.
@@ -346,7 +346,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
ExternalReference compare =
ExternalReference::re_case_insensitive_compare_uc16();
- CallCFunction(compare, argument_count);
+ __ CallCFunction(compare, argument_count);
// Pop original values before reacting on result value.
__ pop(ebx);
__ pop(backtrack_stackpointer());
@@ -784,13 +784,13 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
// Call GrowStack(backtrack_stackpointer())
- int num_arguments = 2;
- FrameAlign(num_arguments, ebx);
+ static const int num_arguments = 2;
+ __ PrepareCallCFunction(num_arguments, ebx);
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, Operand(eax));
@@ -951,8 +951,8 @@ void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
- int num_arguments = 3;
- FrameAlign(num_arguments, scratch);
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(Operand(esp, 2 * kPointerSize), ebp);
// Code* of self.
@@ -962,7 +962,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ mov(Operand(esp, 0 * kPointerSize), eax);
ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state();
- CallCFunction(check_stack_guard, num_arguments);
+ __ CallCFunction(check_stack_guard, num_arguments);
}
@@ -1153,37 +1153,6 @@ void RegExpMacroAssemblerIA32::CheckStackLimit() {
}
-void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
- // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
- // use it, e.g., for SafeCall), we know the number of elements on the stack
- // since the last frame alignment. We might be able to do this simpler then.
- int frameAlignment = OS::ActivationFrameAlignment();
- if (frameAlignment != 0) {
- // Make stack end at alignment and make room for num_arguments words
- // and the original value of esp.
- __ mov(scratch, esp);
- __ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frameAlignment));
- __ and_(esp, -frameAlignment);
- __ mov(Operand(esp, num_arguments * kPointerSize), scratch);
- } else {
- __ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
- int num_arguments) {
- __ mov(Operand(eax), Immediate(function));
- __ call(Operand(eax));
- if (OS::ActivationFrameAlignment() != 0) {
- __ mov(esp, Operand(esp, num_arguments * kPointerSize));
- } else {
- __ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
- }
-}
-
-
void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
if (mode_ == ASCII) {
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index d9866b72b9..0d5e272dcf 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -187,21 +187,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// (ecx) and increments it by a word size.
inline void Pop(Register target);
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- inline void FrameAlign(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by FrameAlign. The called function is not allowed to trigger a garbage
- // collection, since that might move the code and invalidate the return
- // address (unless this is somehow accounted for).
- inline void CallCFunction(ExternalReference function, int num_arguments);
-
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc
index 20539bff95..3ccfe8cd02 100644
--- a/deps/v8/src/ia32/register-allocator-ia32.cc
+++ b/deps/v8/src/ia32/register-allocator-ia32.cc
@@ -49,6 +49,7 @@ void Result::ToRegister() {
Immediate(handle()));
}
// This result becomes a copy of the fresh one.
+ fresh.set_number_info(number_info());
*this = fresh;
}
ASSERT(is_register());
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index ade324b92f..a7e9a69a04 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -446,7 +446,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@@ -468,7 +468,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
}
private:
@@ -907,7 +907,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(Immediate(Handle<Map>(transition)));
__ push(eax);
__ push(scratch);
- __ TailCallRuntime(
+ __ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
return;
}
@@ -1589,7 +1589,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4, 1);
+ __ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1638,7 +1638,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1691,23 +1691,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_store_field, 1);
- // Get the name from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
// Check that the name has not changed.
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- // Get the object from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
object,
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 75ff670b6d..7b03a5b2f7 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -37,23 +37,6 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
- for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
- }
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- register_locations_[i] = kIllegalIndex;
- }
-}
-
-
void VirtualFrame::SyncElementBelowStackPointer(int index) {
// Emit code to write elements below the stack pointer to their
// (already allocated) stack address.
@@ -179,7 +162,7 @@ void VirtualFrame::MakeMergable() {
if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) {
// Just spill.
- elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
+ elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown());
} else {
// Allocate to a register.
FrameElement backing_element; // Invalid if not a copy.
@@ -191,7 +174,7 @@ void VirtualFrame::MakeMergable() {
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED,
- NumberInfo::kUnknown);
+ NumberInfo::Unknown());
Use(fresh.reg(), i);
// Emit a move.
@@ -224,7 +207,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop,
// including when registers are spilled.
elements_[i].clear_copied();
- elements_[i].set_number_info(NumberInfo::kUnknown);
+ elements_[i].set_number_info(NumberInfo::Unknown());
}
}
}
@@ -896,30 +879,39 @@ Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
}
+// This function assumes that the only results that could be in a_reg or b_reg
+// are a and b. Other results can be live, but must not be in a_reg or b_reg.
+void VirtualFrame::MoveResultsToRegisters(Result* a,
+ Result* b,
+ Register a_reg,
+ Register b_reg) {
+ if (a->is_register() && a->reg().is(a_reg)) {
+ b->ToRegister(b_reg);
+ } else if (!cgen()->allocator()->is_used(a_reg)) {
+ a->ToRegister(a_reg);
+ b->ToRegister(b_reg);
+ } else if (cgen()->allocator()->is_used(b_reg)) {
+ // a must be in b_reg, b in a_reg.
+ __ xchg(a_reg, b_reg);
+ // Results a and b will be invalidated, so it is ok if they are switched.
+ } else {
+ b->ToRegister(b_reg);
+ a->ToRegister(a_reg);
+ }
+ a->Unuse();
+ b->Unuse();
+}
+
+
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
// name in ecx and receiver in eax.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
Result receiver = Pop();
PrepareForCall(0, 0); // No stack arguments.
- // Move results to the right registers:
- if (name.is_register() && name.reg().is(eax)) {
- if (receiver.is_register() && receiver.reg().is(ecx)) {
- // Wrong registers.
- __ xchg(eax, ecx);
- } else {
- // Register ecx is free for name, which frees eax for receiver.
- name.ToRegister(ecx);
- receiver.ToRegister(eax);
- }
- } else {
- // Register eax is free for receiver, which frees ecx for name.
- receiver.ToRegister(eax);
- name.ToRegister(ecx);
- }
- name.Unuse();
- receiver.Unuse();
+ MoveResultsToRegisters(&name, &receiver, ecx, eax);
+
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
@@ -929,20 +921,7 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
-
- if (!key.is_register() || !key.reg().is(edx)) {
- // Register edx is available for receiver.
- receiver.ToRegister(edx);
- key.ToRegister(eax);
- } else if (!receiver.is_register() || !receiver.reg().is(eax)) {
- // Register eax is available for key.
- key.ToRegister(eax);
- receiver.ToRegister(edx);
- } else {
- __ xchg(edx, eax);
- }
- key.Unuse();
- receiver.Unuse();
+ MoveResultsToRegisters(&key, &receiver, eax, edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode);
@@ -958,42 +937,57 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(eax);
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, name);
+ value.Unuse();
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
-
- if (value.is_register() && value.reg().is(edx)) {
- if (receiver.is_register() && receiver.reg().is(eax)) {
- // Wrong registers.
- __ xchg(eax, edx);
- } else {
- // Register eax is free for value, which frees edx for receiver.
- value.ToRegister(eax);
- receiver.ToRegister(edx);
- }
- } else {
- // Register edx is free for receiver, which guarantees eax is free for
- // value.
- receiver.ToRegister(edx);
- value.ToRegister(eax);
- }
+ MoveResultsToRegisters(&value, &receiver, eax, edx);
}
__ mov(ecx, name);
- value.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
Result VirtualFrame::CallKeyedStoreIC() {
// Value, key, and receiver are on the top of the frame. The IC
- // expects value in eax and key and receiver on the stack. It does
- // not drop the key and receiver.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // expects value in eax, key in ecx, and receiver in edx.
Result value = Pop();
- PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
- value.ToRegister(eax);
- value.Unuse();
+ Result key = Pop();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ if (!cgen()->allocator()->is_used(eax) ||
+ (value.is_register() && value.reg().is(eax))) {
+ value.ToRegister(eax); // No effect if value is in eax already.
+ MoveResultsToRegisters(&key, &receiver, ecx, edx);
+ value.Unuse();
+ } else if (!cgen()->allocator()->is_used(ecx) ||
+ (key.is_register() && key.reg().is(ecx))) {
+ // Receiver and/or key are in eax.
+ key.ToRegister(ecx);
+ MoveResultsToRegisters(&value, &receiver, eax, edx);
+ key.Unuse();
+ } else if (!cgen()->allocator()->is_used(edx) ||
+ (receiver.is_register() && receiver.reg().is(edx))) {
+ receiver.ToRegister(edx);
+ MoveResultsToRegisters(&key, &value, ecx, eax);
+ receiver.Unuse();
+ } else {
+ // All three registers are used, and no value is in the correct place.
+ // We have one of the two circular permutations of eax, ecx, edx.
+ ASSERT(value.is_register());
+ if (value.reg().is(ecx)) {
+ __ xchg(eax, edx);
+ __ xchg(eax, ecx);
+ } else {
+ __ xchg(eax, ecx);
+ __ xchg(eax, edx);
+ }
+ value.Unuse();
+ key.Unuse();
+ receiver.Unuse();
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
@@ -1068,7 +1062,7 @@ Result VirtualFrame::Pop() {
ASSERT(element.is_valid());
// Get number type information of the result.
- NumberInfo::Type info;
+ NumberInfo info;
if (!element.is_copy()) {
info = element.number_info();
} else {
@@ -1143,7 +1137,7 @@ void VirtualFrame::EmitPop(Operand operand) {
}
-void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -1151,7 +1145,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
}
-void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Operand operand, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -1159,7 +1153,7 @@ void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
}
-void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index d4cc1417c9..cd2d18f432 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -73,7 +73,7 @@ class VirtualFrame: public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- VirtualFrame();
+ inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
@@ -84,7 +84,7 @@ class VirtualFrame: public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
- NumberInfo::Type info = NumberInfo::kUninitialized);
+ NumberInfo info = NumberInfo::Uninitialized());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -388,14 +388,14 @@ class VirtualFrame: public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(Operand operand,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(Immediate immediate,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
// Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+ inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
@@ -571,6 +571,14 @@ class VirtualFrame: public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
+ // This function assumes that a and b are the only results that could be in
+ // the registers a_reg or b_reg. Other results can be live, but must not
+ // be in the registers a_reg or b_reg. The results a and b are invalidated.
+ void MoveResultsToRegisters(Result* a,
+ Result* b,
+ Register a_reg,
+ Register b_reg);
+
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
Result RawCallStub(CodeStub* stub);
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 107c3c0ae6..f82e61e47b 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -63,7 +63,9 @@ void IC::TraceIC(const char* type,
Code* new_target,
const char* extra_info) {
if (FLAG_trace_ic) {
- State new_state = StateFrom(new_target, Heap::undefined_value());
+ State new_state = StateFrom(new_target,
+ Heap::undefined_value(),
+ Heap::undefined_value());
PrintF("[%s (%c->%c)%s", type,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state),
@@ -132,7 +134,7 @@ Address IC::OriginalCodeAddress() {
}
#endif
-IC::State IC::StateFrom(Code* target, Object* receiver) {
+IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
@@ -148,7 +150,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver) {
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
- int index = map->IndexInCodeCache(target);
+ int index = map->IndexInCodeCache(String::cast(name), target);
if (index >= 0) {
// For keyed load/store, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
@@ -160,7 +162,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver) {
// Remove the target from the code cache to avoid hitting the same
// invalid stub again.
- map->RemoveFromCodeCache(index);
+ map->RemoveFromCodeCache(String::cast(name), target, index);
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@@ -222,6 +224,7 @@ void IC::Clear(Address address) {
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
+ case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target);
default: UNREACHABLE();
}
}
@@ -1049,6 +1052,20 @@ Object* StoreIC::Store(State state,
return *value;
}
+
+ // Use specialized code for setting the length of arrays.
+ if (receiver->IsJSArray()
+ && name->Equals(Heap::length_symbol())
+ && receiver->AllowsSetElementsLength()) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
+#endif
+ Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
+ set_target(target);
+ StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+ return receiver->SetProperty(*name, *value, NONE);
+ }
+
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
@@ -1285,7 +1302,7 @@ Object* CallIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
CallIC ic;
- IC::State state = IC::StateFrom(ic.target(), args[0]);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result =
ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
@@ -1318,7 +1335,7 @@ Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
LoadIC ic;
- IC::State state = IC::StateFrom(ic.target(), args[0]);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
@@ -1328,7 +1345,7 @@ Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedLoadIC ic;
- IC::State state = IC::StateFrom(ic.target(), args[0]);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
}
@@ -1338,7 +1355,7 @@ Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
StoreIC ic;
- IC::State state = IC::StateFrom(ic.target(), args[0]);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<String>(1),
args.at<Object>(2));
}
@@ -1351,7 +1368,9 @@ Object* StoreIC_ArrayLength(Arguments args) {
JSObject* receiver = JSObject::cast(args[0]);
Object* len = args[1];
- return receiver->SetElementsLength(len);
+ Object* result = receiver->SetElementsLength(len);
+ if (result->IsFailure()) return result;
+ return len;
}
@@ -1394,12 +1413,118 @@ Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
KeyedStoreIC ic;
- IC::State state = IC::StateFrom(ic.target(), args[0]);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
args.at<Object>(2));
}
+void BinaryOpIC::patch(Code* code) {
+ set_target(code);
+}
+
+
+void BinaryOpIC::Clear(Address address, Code* target) {
+ if (target->ic_state() == UNINITIALIZED) return;
+
+ // At the end of a fast case stub there should be a reference to
+ // a corresponding UNINITIALIZED stub, so look for the last reloc info item.
+ RelocInfo* rinfo = NULL;
+ for (RelocIterator it(target, RelocInfo::kCodeTargetMask);
+ !it.done(); it.next()) {
+ rinfo = it.rinfo();
+ }
+
+ ASSERT(rinfo != NULL);
+ Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ ASSERT(uninit_stub->ic_state() == UNINITIALIZED &&
+ uninit_stub->kind() == Code::BINARY_OP_IC);
+ SetTargetAtAddress(address, uninit_stub);
+}
+
+
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
+ switch (type_info) {
+ case DEFAULT: return "Default";
+ case GENERIC: return "Generic";
+ case HEAP_NUMBERS: return "HeapNumbers";
+ case STRINGS: return "Strings";
+ default: return "Invalid";
+ }
+}
+
+
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
+ switch (type_info) {
+ // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
+ // are not cleared at GC.
+ case DEFAULT: return UNINITIALIZED;
+
+ // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
+ // conceptually closer.
+ case GENERIC: return MEGAMORPHIC;
+
+ default: return MONOMORPHIC;
+ }
+}
+
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
+ Object* right) {
+ // Patching is never requested for the two smis.
+ ASSERT(!left->IsSmi() || !right->IsSmi());
+
+ if (left->IsNumber() && right->IsNumber()) {
+ return HEAP_NUMBERS;
+ }
+
+ if (left->IsString() || right->IsString()) {
+ // Patching for fast string ADD makes sense even if only one of the
+ // arguments is a string.
+ return STRINGS;
+ }
+
+ return GENERIC;
+}
+
+
+// defined in codegen-<arch>.cc
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
+
+
+Object* BinaryOp_Patch(Arguments args) {
+ ASSERT(args.length() == 6);
+
+ Handle<Object> left = args.at<Object>(0);
+ Handle<Object> right = args.at<Object>(1);
+ Handle<Object> result = args.at<Object>(2);
+ int key = Smi::cast(args[3])->value();
+#ifdef DEBUG
+ Token::Value op = static_cast<Token::Value>(Smi::cast(args[4])->value());
+ BinaryOpIC::TypeInfo prev_type_info =
+ static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[5])->value());
+#endif // DEBUG
+ { HandleScope scope;
+ BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
+ Handle<Code> code = GetBinaryOpStub(key, type_info);
+ if (!code.is_null()) {
+ BinaryOpIC ic;
+ ic.patch(*code);
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[BinaryOpIC (%s->%s)#%s]\n",
+ BinaryOpIC::GetName(prev_type_info),
+ BinaryOpIC::GetName(type_info),
+ Token::Name(op));
+ }
+#endif // DEBUG
+ }
+ }
+
+ return *result;
+}
+
+
static Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index d545989bf6..7464a57c6b 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -55,7 +55,8 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadPropertyWithInterceptorForLoad) \
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
- ICU(StoreInterceptorProperty)
+ ICU(StoreInterceptorProperty) \
+ ICU(BinaryOp_Patch)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@@ -93,8 +94,8 @@ class IC {
Code* target() { return GetTargetAtAddress(address()); }
inline Address address();
- // Compute the current IC state based on the target stub and the receiver.
- static State StateFrom(Code* target, Object* receiver);
+ // Compute the current IC state based on the target stub, receiver and name.
+ static State StateFrom(Code* target, Object* receiver, Object* name);
// Clear the inline cache to initial state.
static void Clear(Address address);
@@ -444,6 +445,30 @@ class KeyedStoreIC: public IC {
};
+class BinaryOpIC: public IC {
+ public:
+
+ enum TypeInfo {
+ DEFAULT, // Initial state. When first executed, patches to one
+ // of the following states depending on the operands types.
+ HEAP_NUMBERS, // Both arguments are HeapNumbers.
+ STRINGS, // At least one of the arguments is String.
+ GENERIC // Non-specialized case (processes any type combination).
+ };
+
+ BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+
+ void patch(Code* code);
+
+ static void Clear(Address address, Code* target);
+
+ static const char* GetName(TypeInfo type_info);
+
+ static State ToState(TypeInfo type_info);
+
+ static TypeInfo GetTypeInfo(Object* left, Object* right);
+};
+
} } // namespace v8::internal
#endif // V8_IC_H_
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index b99a89e1b1..46d53c215e 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -29,6 +29,7 @@
#define V8_JSREGEXP_H_
#include "macro-assembler.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/jump-target-inl.h b/deps/v8/src/jump-target-inl.h
index 12fabc329d..6db0081536 100644
--- a/deps/v8/src/jump-target-inl.h
+++ b/deps/v8/src/jump-target-inl.h
@@ -46,7 +46,7 @@ void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
entry_frame_->elements_[target->index()].set_copied();
}
if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
- element->set_number_info(NumberInfo::kUnknown);
+ element->set_number_info(NumberInfo::Unknown());
}
}
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc
index ab09870b95..7b1ced7eb3 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/jump-target.cc
@@ -135,7 +135,7 @@ void JumpTarget::ComputeEntryFrame() {
FrameElement* target = elements[index];
if (target == NULL) {
entry_frame_->elements_.Add(
- FrameElement::MemoryElement(NumberInfo::kUninitialized));
+ FrameElement::MemoryElement(NumberInfo::Uninitialized()));
} else {
entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target);
@@ -152,12 +152,12 @@ void JumpTarget::ComputeEntryFrame() {
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
- NumberInfo::Type info = NumberInfo::kUninitialized;
+ NumberInfo info = NumberInfo::Uninitialized();
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
if (direction_ == BIDIRECTIONAL) {
- info = NumberInfo::kUnknown;
+ info = NumberInfo::Unknown();
} else if (!element.is_copy()) {
info = NumberInfo::Combine(info, element.number_info());
} else {
@@ -181,7 +181,7 @@ void JumpTarget::ComputeEntryFrame() {
// We must have a number type information now (not for copied elements).
ASSERT(entry_frame_->elements_[i].is_copy()
- || info != NumberInfo::kUninitialized);
+ || !info.IsUninitialized());
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
@@ -211,7 +211,7 @@ void JumpTarget::ComputeEntryFrame() {
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
- NumberInfo::kUninitialized);
+ NumberInfo::Uninitialized());
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i);
}
@@ -225,8 +225,7 @@ void JumpTarget::ComputeEntryFrame() {
if (direction_ == BIDIRECTIONAL) {
for (int i = 0; i < length; ++i) {
if (!entry_frame_->elements_[i].is_copy()) {
- ASSERT(entry_frame_->elements_[i].number_info() ==
- NumberInfo::kUnknown);
+ ASSERT(entry_frame_->elements_[i].number_info().IsUnknown());
}
}
}
diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h
index dd291c6b38..db7c115538 100644
--- a/deps/v8/src/jump-target.h
+++ b/deps/v8/src/jump-target.h
@@ -29,6 +29,7 @@
#define V8_JUMP_TARGET_H_
#include "macro-assembler.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/liveedit-delay.js b/deps/v8/src/liveedit-delay.js
new file mode 100644
index 0000000000..12479b13c8
--- /dev/null
+++ b/deps/v8/src/liveedit-delay.js
@@ -0,0 +1,426 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// LiveEdit feature implementation. The script should be executed after
+// debug-delay.js.
+
+
+// Changes script text and recompiles all relevant functions if possible.
+// The change is always a substring (change_pos, change_pos + change_len)
+// being replaced with a completely different string new_str.
+//
+// Only one function will have its Code changed in result of this function.
+// All nested functions (should they have any instances at the moment) are left
+// unchanged and re-linked to a newly created script instance representing old
+// version of the source. (Generally speaking,
+// during the change all nested functions are erased and completely different
+// set of nested functions are introduced.) All other functions just have
+// their positions updated.
+//
+// @param {Script} script that is being changed
+// @param {Array} change_log a list that collects engineer-readable description
+// of what happened.
+Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str,
+ change_log) {
+
+ // So far the function works as namespace.
+ var liveedit = Debug.LiveEditChangeScript;
+ var Assert = liveedit.Assert;
+
+ // Fully compiles source string as a script. Returns Array of
+ // FunctionCompileInfo -- a descriptions of all functions of the script.
+ // Elements of array are ordered by start positions of functions (from top
+ // to bottom) in the source. Fields outer_index and next_sibling_index help
+ // to navigate the nesting structure of functions.
+ //
+ // The script is used for compilation, because it produces code that
+ // needs to be linked with some particular script (for nested functions).
+ function DebugGatherCompileInfo(source) {
+ // Get function info, elements are partially sorted (it is a tree
+ // of nested functions serialized as parent followed by serialized children.
+ var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
+
+ // Sort function infos by start position field.
+ var compile_info = new Array();
+ var old_index_map = new Array();
+ for (var i = 0; i < raw_compile_info.length; i++) {
+ compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i]));
+ old_index_map.push(i);
+ }
+
+ for (var i = 0; i < compile_info.length; i++) {
+ var k = i;
+ for (var j = i + 1; j < compile_info.length; j++) {
+ if (compile_info[k].start_position > compile_info[j].start_position) {
+ k = j;
+ }
+ }
+ if (k != i) {
+ var temp_info = compile_info[k];
+ var temp_index = old_index_map[k];
+ compile_info[k] = compile_info[i];
+ old_index_map[k] = old_index_map[i];
+ compile_info[i] = temp_info;
+ old_index_map[i] = temp_index;
+ }
+ }
+
+ // After sorting update outer_inder field using old_index_map. Also
+ // set next_sibling_index field.
+ var current_index = 0;
+
+ // The recursive function, that goes over all children of a particular
+ // node (i.e. function info).
+ function ResetIndexes(new_parent_index, old_parent_index) {
+ var previous_sibling = -1;
+ while (current_index < compile_info.length &&
+ compile_info[current_index].outer_index == old_parent_index) {
+ var saved_index = current_index;
+ compile_info[saved_index].outer_index = new_parent_index;
+ if (previous_sibling != -1) {
+ compile_info[previous_sibling].next_sibling_index = saved_index;
+ }
+ previous_sibling = saved_index;
+ current_index++;
+ ResetIndexes(saved_index, old_index_map[saved_index]);
+ }
+ if (previous_sibling != -1) {
+ compile_info[previous_sibling].next_sibling_index = -1;
+ }
+ }
+
+ ResetIndexes(-1, -1);
+ Assert(current_index == compile_info.length);
+
+ return compile_info;
+ }
+
+ // Given a positions, finds a function that fully includes the entire change.
+ function FindChangedFunction(compile_info, offset, len) {
+ // First condition: function should start before the change region.
+ // Function #0 (whole-script function) always does, but we want
+ // one, that is later in this list.
+ var index = 0;
+ while (index + 1 < compile_info.length &&
+ compile_info[index + 1].start_position <= offset) {
+ index++;
+ }
+ // Now we are at the last function that begins before the change
+ // region. The function that covers entire change region is either
+ // this function or the enclosing one.
+ for (; compile_info[index].end_position < offset + len;
+ index = compile_info[index].outer_index) {
+ Assert(index != -1);
+ }
+ return index;
+ }
+
+ // Variable forward declarations. Preprocessor "Minifier" needs them.
+ var old_compile_info;
+ var shared_infos;
+ // Finds SharedFunctionInfo that corresponds compile info with index
+ // in old version of the script.
+ function FindFunctionInfo(index) {
+ var old_info = old_compile_info[index];
+ for (var i = 0; i < shared_infos.length; i++) {
+ var info = shared_infos[i];
+ if (info.start_position == old_info.start_position &&
+ info.end_position == old_info.end_position) {
+ return info;
+ }
+ }
+ }
+
+ // Replaces function's Code.
+ function PatchCode(new_info, shared_info) {
+ %LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
+
+ change_log.push( {function_patched: new_info.function_name} );
+ }
+
+ var change_len_old;
+ var change_len_new;
+ // Translate position in old version of script into position in new
+ // version of script.
+ function PosTranslator(old_pos) {
+ if (old_pos <= change_pos) {
+ return old_pos;
+ }
+ if (old_pos >= change_pos + change_len_old) {
+ return old_pos + change_len_new - change_len_old;
+ }
+ return -1;
+ }
+
+ var position_change_array;
+ var position_patch_report;
+ function PatchPositions(new_info, shared_info) {
+ if (!shared_info) {
+ // TODO: explain what is happening.
+ return;
+ }
+ %LiveEditPatchFunctionPositions(shared_info.raw_array,
+ position_change_array);
+ position_patch_report.push( { name: new_info.function_name } );
+ }
+
+ var link_to_old_script_report;
+ var old_script;
+ // Makes a function associated with another instance of a script (the
+ // one representing its old version). This way the function still
+ // may access its own text.
+ function LinkToOldScript(shared_info) {
+ %LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
+
+ link_to_old_script_report.push( { name: shared_info.function_name } );
+ }
+
+
+
+ var old_source = script.source;
+ var change_len_old = change_len;
+ var change_len_new = new_str.length;
+
+ // Prepare new source string.
+ var new_source = old_source.substring(0, change_pos) +
+ new_str + old_source.substring(change_pos + change_len);
+
+ // Find all SharedFunctionInfo's that are compiled from this script.
+ var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
+
+ var shared_infos = new Array();
+
+ for (var i = 0; i < shared_raw_list.length; i++) {
+ shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i]));
+ }
+
+ // Gather compile information about old version of script.
+ var old_compile_info = DebugGatherCompileInfo(old_source);
+
+ // Gather compile information about new version of script.
+ var new_compile_info;
+ try {
+ new_compile_info = DebugGatherCompileInfo(new_source);
+ } catch (e) {
+ throw new liveedit.Failure("Failed to compile new version of script: " + e);
+ }
+
+ // An index of a single function, that is going to have its code replaced.
+ var function_being_patched =
+ FindChangedFunction(old_compile_info, change_pos, change_len_old);
+
+ // In old and new script versions function with a change should have the
+ // same indexes.
+ var function_being_patched2 =
+ FindChangedFunction(new_compile_info, change_pos, change_len_new);
+ Assert(function_being_patched == function_being_patched2,
+ "inconsistent old/new compile info");
+
+ // Check that function being patched has the same expectations in a new
+ // version. Otherwise we cannot safely patch its behavior and should
+ // choose the outer function instead.
+ while (!liveedit.CompareFunctionExpectations(
+ old_compile_info[function_being_patched],
+ new_compile_info[function_being_patched])) {
+
+ Assert(old_compile_info[function_being_patched].outer_index ==
+ new_compile_info[function_being_patched].outer_index);
+ function_being_patched =
+ old_compile_info[function_being_patched].outer_index;
+ Assert(function_being_patched != -1);
+ }
+
+ // Check that function being patched is not currently on stack.
+ liveedit.CheckStackActivations(
+ [ FindFunctionInfo(function_being_patched) ], change_log );
+
+
+ // Committing all changes.
+ var old_script_name = liveedit.CreateNameForOldScript(script);
+
+ // Update the script text and create a new script representing an old
+ // version of the script.
+ var old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
+
+ PatchCode(new_compile_info[function_being_patched],
+ FindFunctionInfo(function_being_patched));
+
+ var position_patch_report = new Array();
+ change_log.push( {position_patched: position_patch_report} );
+
+ var position_change_array = [ change_pos,
+ change_pos + change_len_old,
+ change_pos + change_len_new ];
+
+ // Update positions of all outer functions (i.e. all functions, that
+ // are partially below the function being patched).
+ for (var i = new_compile_info[function_being_patched].outer_index;
+ i != -1;
+ i = new_compile_info[i].outer_index) {
+ PatchPositions(new_compile_info[i], FindFunctionInfo(i));
+ }
+
+ // Update positions of all functions that are fully below the function
+ // being patched.
+ var old_next_sibling =
+ old_compile_info[function_being_patched].next_sibling_index;
+ var new_next_sibling =
+ new_compile_info[function_being_patched].next_sibling_index;
+
+ // We simply go over the tail of both old and new lists. Their tails should
+ // have an identical structure.
+ if (old_next_sibling == -1) {
+ Assert(new_next_sibling == -1);
+ } else {
+ Assert(old_compile_info.length - old_next_sibling ==
+ new_compile_info.length - new_next_sibling);
+
+ for (var i = old_next_sibling, j = new_next_sibling;
+ i < old_compile_info.length; i++, j++) {
+ PatchPositions(new_compile_info[j], FindFunctionInfo(i));
+ }
+ }
+
+ var link_to_old_script_report = new Array();
+ change_log.push( { linked_to_old_script: link_to_old_script_report } );
+
+ // We need to link to old script all former nested functions.
+ for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
+ LinkToOldScript(FindFunctionInfo(i), old_script);
+ }
+}
+
+Debug.LiveEditChangeScript.Assert = function(condition, message) {
+ if (!condition) {
+ if (message) {
+ throw "Assert " + message;
+ } else {
+ throw "Assert";
+ }
+ }
+}
+
+// An object describing function compilation details. Its index fields
+// apply to indexes inside array that stores these objects.
+Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) {
+ this.function_name = raw_array[0];
+ this.start_position = raw_array[1];
+ this.end_position = raw_array[2];
+ this.param_num = raw_array[3];
+ this.code = raw_array[4];
+ this.scope_info = raw_array[5];
+ this.outer_index = raw_array[6];
+ this.next_sibling_index = null;
+ this.raw_array = raw_array;
+}
+
+// A structure describing SharedFunctionInfo.
+Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) {
+ this.function_name = raw_array[0];
+ this.start_position = raw_array[1];
+ this.end_position = raw_array[2];
+ this.info = raw_array[3];
+ this.raw_array = raw_array;
+}
+
+// Adds a suffix to script name to mark that it is old version.
+Debug.LiveEditChangeScript.CreateNameForOldScript = function(script) {
+ // TODO(635): try better than this; support several changes.
+ return script.name + " (old)";
+}
+
+// Compares a function interface old and new version, whether it
+// changed or not.
+Debug.LiveEditChangeScript.CompareFunctionExpectations =
+ function(function_info1, function_info2) {
+ // Check that function has the same number of parameters (there may exist
+ // an adapter, that won't survive function parameter number change).
+ if (function_info1.param_num != function_info2.param_num) {
+ return false;
+ }
+ var scope_info1 = function_info1.scope_info;
+ var scope_info2 = function_info2.scope_info;
+
+ if (!scope_info1) {
+ return !scope_info2;
+ }
+
+ if (scope_info1.length != scope_info2.length) {
+ return false;
+ }
+
+ // Check that outer scope structure is not changed. Otherwise the function
+ // will not properly work with existing scopes.
+ return scope_info1.toString() == scope_info2.toString();
+}
+
+// For array of wrapped shared function infos checks that none of them
+// have activations on stack (of any thread). Throws a Failure exception
+// if this proves to be false.
+Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
+ change_log) {
+ var liveedit = Debug.LiveEditChangeScript;
+
+ var shared_list = new Array();
+ for (var i = 0; i < shared_wrapper_list.length; i++) {
+ shared_list[i] = shared_wrapper_list[i].info;
+ }
+ var result = %LiveEditCheckStackActivations(shared_list);
+ var problems = new Array();
+ for (var i = 0; i < shared_list.length; i++) {
+ if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) {
+ var shared = shared_list[i];
+ var description = {
+ name: shared.function_name,
+ start_pos: shared.start_position,
+ end_pos: shared.end_position
+ };
+ problems.push(description);
+ }
+ }
+ if (problems.length > 0) {
+ change_log.push( { functions_on_stack: problems } );
+ throw new liveedit.Failure("Blocked by functions on stack");
+ }
+}
+
+// A copy of the FunctionPatchabilityStatus enum from liveedit.h
+Debug.LiveEditChangeScript.FunctionPatchabilityStatus = {
+ FUNCTION_AVAILABLE_FOR_PATCH: 0,
+ FUNCTION_BLOCKED_ON_STACK: 1
+}
+
+
+// A logical failure in liveedit process. This means that change_log
+// is valid and consistent description of what happened.
+Debug.LiveEditChangeScript.Failure = function(message) {
+ this.message = message;
+}
+
+Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
+ return "LiveEdit Failure: " + this.message;
+}
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index c50e007f93..513cc0233f 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -39,49 +39,445 @@ namespace v8 {
namespace internal {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+static void CompileScriptForTracker(Handle<Script> script) {
+ const bool is_eval = false;
+ const bool is_global = true;
+ // TODO(635): support extensions.
+ Extension* extension = NULL;
+
+ PostponeInterruptsScope postpone;
+
+ // Only allow non-global compiles for eval.
+ ASSERT(is_eval || is_global);
+
+ // Build AST.
+ ScriptDataImpl* pre_data = NULL;
+ FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+
+ // Check for parse errors.
+ if (lit == NULL) {
+ ASSERT(Top::has_pending_exception());
+ return;
+ }
+
+ // Compile the code.
+ CompilationInfo info(lit, script, is_eval);
+ Handle<Code> code = MakeCodeForLiveEdit(&info);
+
+ // Check for stack-overflow exceptions.
+ if (code.is_null()) {
+ Top::StackOverflow();
+ return;
+ }
+}
+
+// Unwraps JSValue object, returning its field "value"
+static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
+ return Handle<Object>(jsValue->value());
+}
+
+// Wraps any object into a OpaqueReference, that will hide the object
+// from JavaScript.
+static Handle<JSValue> WrapInJSValue(Object* object) {
+ Handle<JSFunction> constructor = Top::opaque_reference_function();
+ Handle<JSValue> result =
+ Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+ result->set_value(object);
+ return result;
+}
+
+// Simple helper class that creates more or less typed structures over
+// JSArray object. This is an adhoc method of passing structures from C++
+// to JavaScript.
+template<typename S>
+class JSArrayBasedStruct {
+ public:
+ static S Create() {
+ Handle<JSArray> array = Factory::NewJSArray(S::kSize_);
+ return S(array);
+ }
+ static S cast(Object* object) {
+ JSArray* array = JSArray::cast(object);
+ Handle<JSArray> array_handle(array);
+ return S(array_handle);
+ }
+ explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
+ }
+ Handle<JSArray> GetJSArray() {
+ return array_;
+ }
+ protected:
+ void SetField(int field_position, Handle<Object> value) {
+ SetElement(array_, field_position, value);
+ }
+ void SetSmiValueField(int field_position, int value) {
+ SetElement(array_, field_position, Handle<Smi>(Smi::FromInt(value)));
+ }
+ Object* GetField(int field_position) {
+ return array_->GetElement(field_position);
+ }
+ int GetSmiValueField(int field_position) {
+ Object* res = GetField(field_position);
+ return Smi::cast(res)->value();
+ }
+ private:
+ Handle<JSArray> array_;
+};
+
+
+// Represents some function compilation details. This structure will be used
+// from JavaScript. It contains Code object, which is kept wrapped
+// into a BlindReference for sanitizing reasons.
+class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
+ public:
+ explicit FunctionInfoWrapper(Handle<JSArray> array)
+ : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
+ }
+ void SetInitialProperties(Handle<String> name, int start_position,
+ int end_position, int param_num, int parent_index) {
+ HandleScope scope;
+ this->SetField(kFunctionNameOffset_, name);
+ this->SetSmiValueField(kStartPositionOffset_, start_position);
+ this->SetSmiValueField(kEndPositionOffset_, end_position);
+ this->SetSmiValueField(kParamNumOffset_, param_num);
+ this->SetSmiValueField(kParentIndexOffset_, parent_index);
+ }
+ void SetFunctionCode(Handle<Code> function_code) {
+ Handle<JSValue> wrapper = WrapInJSValue(*function_code);
+ this->SetField(kCodeOffset_, wrapper);
+ }
+ void SetScopeInfo(Handle<JSArray> scope_info_array) {
+ this->SetField(kScopeInfoOffset_, scope_info_array);
+ }
+ int GetParentIndex() {
+ return this->GetSmiValueField(kParentIndexOffset_);
+ }
+ Handle<Code> GetFunctionCode() {
+ Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
+ JSValue::cast(this->GetField(kCodeOffset_))));
+ return Handle<Code>::cast(raw_result);
+ }
+ int GetStartPosition() {
+ return this->GetSmiValueField(kStartPositionOffset_);
+ }
+ int GetEndPosition() {
+ return this->GetSmiValueField(kEndPositionOffset_);
+ }
+
+ private:
+ static const int kFunctionNameOffset_ = 0;
+ static const int kStartPositionOffset_ = 1;
+ static const int kEndPositionOffset_ = 2;
+ static const int kParamNumOffset_ = 3;
+ static const int kCodeOffset_ = 4;
+ static const int kScopeInfoOffset_ = 5;
+ static const int kParentIndexOffset_ = 6;
+ static const int kSize_ = 7;
+
+ friend class JSArrayBasedStruct<FunctionInfoWrapper>;
+};
+
+// Wraps SharedFunctionInfo along with some of its fields for passing it
+// back to JavaScript. SharedFunctionInfo object itself is additionally
+// wrapped into BlindReference for sanitizing reasons.
+class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
+ public:
+ explicit SharedInfoWrapper(Handle<JSArray> array)
+ : JSArrayBasedStruct<SharedInfoWrapper>(array) {
+ }
+
+ void SetProperties(Handle<String> name, int start_position, int end_position,
+ Handle<SharedFunctionInfo> info) {
+ HandleScope scope;
+ this->SetField(kFunctionNameOffset_, name);
+ Handle<JSValue> info_holder = WrapInJSValue(*info);
+ this->SetField(kSharedInfoOffset_, info_holder);
+ this->SetSmiValueField(kStartPositionOffset_, start_position);
+ this->SetSmiValueField(kEndPositionOffset_, end_position);
+ }
+ Handle<SharedFunctionInfo> GetInfo() {
+ Object* element = this->GetField(kSharedInfoOffset_);
+ Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ return Handle<SharedFunctionInfo>::cast(raw_result);
+ }
+
+ private:
+ static const int kFunctionNameOffset_ = 0;
+ static const int kStartPositionOffset_ = 1;
+ static const int kEndPositionOffset_ = 2;
+ static const int kSharedInfoOffset_ = 3;
+ static const int kSize_ = 4;
+
+ friend class JSArrayBasedStruct<SharedInfoWrapper>;
+};
+
class FunctionInfoListener {
public:
+ FunctionInfoListener() {
+ current_parent_index_ = -1;
+ len_ = 0;
+ result_ = Factory::NewJSArray(10);
+ }
+
void FunctionStarted(FunctionLiteral* fun) {
- // Implementation follows.
+ HandleScope scope;
+ FunctionInfoWrapper info = FunctionInfoWrapper::Create();
+ info.SetInitialProperties(fun->name(), fun->start_position(),
+ fun->end_position(), fun->num_parameters(),
+ current_parent_index_);
+ current_parent_index_ = len_;
+ SetElement(result_, len_, info.GetJSArray());
+ len_++;
}
void FunctionDone() {
- // Implementation follows.
+ HandleScope scope;
+ FunctionInfoWrapper info =
+ FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+ current_parent_index_ = info.GetParentIndex();
}
void FunctionScope(Scope* scope) {
- // Implementation follows.
+ HandleScope handle_scope;
+
+ Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
+ int scope_info_length = 0;
+
+ // Saves some description of scope. It stores name and indexes of
+ // variables in the whole scope chain. Null-named slots delimit
+ // scopes of this chain.
+ Scope* outer_scope = scope->outer_scope();
+ if (outer_scope == NULL) {
+ return;
+ }
+ do {
+ ZoneList<Variable*> list(10);
+ outer_scope->CollectUsedVariables(&list);
+ int j = 0;
+ for (int i = 0; i < list.length(); i++) {
+ Variable* var1 = list[i];
+ Slot* slot = var1->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ if (j != i) {
+ list[j] = var1;
+ }
+ j++;
+ }
+ }
+
+ // Sort it.
+ for (int k = 1; k < j; k++) {
+ int l = k;
+ for (int m = k + 1; m < j; m++) {
+ if (list[l]->slot()->index() > list[m]->slot()->index()) {
+ l = m;
+ }
+ }
+ list[k] = list[l];
+ }
+ for (int i = 0; i < j; i++) {
+ SetElement(scope_info_list, scope_info_length, list[i]->name());
+ scope_info_length++;
+ SetElement(scope_info_list, scope_info_length,
+ Handle<Smi>(Smi::FromInt(list[i]->slot()->index())));
+ scope_info_length++;
+ }
+ SetElement(scope_info_list, scope_info_length,
+ Handle<Object>(Heap::null_value()));
+ scope_info_length++;
+
+ outer_scope = outer_scope->outer_scope();
+ } while (outer_scope != NULL);
+
+ FunctionInfoWrapper info =
+ FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+ info.SetScopeInfo(scope_info_list);
}
void FunctionCode(Handle<Code> function_code) {
- // Implementation follows.
+ FunctionInfoWrapper info =
+ FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+ info.SetFunctionCode(function_code);
}
+
+ Handle<JSArray> GetResult() {
+ return result_;
+ }
+
+ private:
+ Handle<JSArray> result_;
+ int len_;
+ int current_parent_index_;
};
static FunctionInfoListener* active_function_info_listener = NULL;
+JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
+ Handle<String> source) {
+ CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+
+ FunctionInfoListener listener;
+ Handle<Object> original_source = Handle<Object>(script->source());
+ script->set_source(*source);
+ active_function_info_listener = &listener;
+ CompileScriptForTracker(script);
+ active_function_info_listener = NULL;
+ script->set_source(*original_source);
+
+ return *(listener.GetResult());
+}
+
+
+void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
+ HandleScope scope;
+ int len = Smi::cast(array->length())->value();
+ for (int i = 0; i < len; i++) {
+ Handle<SharedFunctionInfo> info(
+ SharedFunctionInfo::cast(array->GetElement(i)));
+ SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
+ Handle<String> name_handle(String::cast(info->name()));
+ info_wrapper.SetProperties(name_handle, info->start_position(),
+ info->end_position(), info);
+ array->SetElement(i, *(info_wrapper.GetJSArray()));
+ }
+}
+
+
+void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+ Handle<JSArray> shared_info_array) {
+ HandleScope scope;
+
+ FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
+ SharedInfoWrapper shared_info_wrapper(shared_info_array);
+
+ Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+
+ shared_info->set_code(*(compile_info_wrapper.GetFunctionCode()),
+ UPDATE_WRITE_BARRIER);
+ shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
+ shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
+ // update breakpoints, original code, constructor stub
+}
+
+
+void LiveEdit::RelinkFunctionToScript(Handle<JSArray> shared_info_array,
+ Handle<Script> script_handle) {
+ SharedInfoWrapper shared_info_wrapper(shared_info_array);
+ Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+
+ shared_info->set_script(*script_handle);
+}
+
+
+// For a script text change (defined as position_change_array), translates
+// position in unchanged text to position in changed text.
+// Text change is a set of non-overlapping regions in text, that have changed
+// their contents and length. It is specified as array of groups of 3 numbers:
+// (change_begin, change_end, change_end_new_position).
+// Each group describes a change in text; groups are sorted by change_begin.
+// Only position in text beyond any changes may be successfully translated.
+// If a positions is inside some region that changed, result is currently
+// undefined.
+static int TranslatePosition(int original_position,
+ Handle<JSArray> position_change_array) {
+ int position_diff = 0;
+ int array_len = Smi::cast(position_change_array->length())->value();
+ // TODO(635): binary search may be used here
+ for (int i = 0; i < array_len; i += 3) {
+ int chunk_start =
+ Smi::cast(position_change_array->GetElement(i))->value();
+ int chunk_end =
+ Smi::cast(position_change_array->GetElement(i + 1))->value();
+ int chunk_changed_end =
+ Smi::cast(position_change_array->GetElement(i + 2))->value();
+ position_diff = chunk_changed_end - chunk_end;
+ if (original_position < chunk_start) {
+ break;
+ }
+ // Position mustn't be inside a chunk.
+ ASSERT(original_position >= chunk_end);
+ }
+
+ return original_position + position_diff;
+}
+
+
+void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
+ Handle<JSArray> position_change_array) {
+ SharedInfoWrapper shared_info_wrapper(shared_info_array);
+ Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
+
+ info->set_start_position(TranslatePosition(info->start_position(),
+ position_change_array));
+ info->set_end_position(TranslatePosition(info->end_position(),
+ position_change_array));
+
+ // Also patch rinfos (both in working code and original code), breakpoints.
+}
+
+
LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionStarted(fun);
}
}
+
+
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionDone();
}
}
+
+
void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionCode(code);
}
}
+
+
void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionScope(scope);
}
}
+
+
bool LiveEditFunctionTracker::IsActive() {
return active_function_info_listener != NULL;
}
+
+#else // ENABLE_DEBUGGER_SUPPORT
+
+// This ifdef-else-endif section provides working or stub implementation of
+// LiveEditFunctionTracker.
+LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+}
+
+
+LiveEditFunctionTracker::~LiveEditFunctionTracker() {
+}
+
+
+void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+}
+
+
+void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
+}
+
+
+bool LiveEditFunctionTracker::IsActive() {
+ return false;
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 73aa7d3d3c..efbcd7404a 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -73,6 +73,34 @@ class LiveEditFunctionTracker {
static bool IsActive();
};
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+class LiveEdit : AllStatic {
+ public:
+ static JSArray* GatherCompileInfo(Handle<Script> script,
+ Handle<String> source);
+
+ static void WrapSharedFunctionInfos(Handle<JSArray> array);
+
+ static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+ Handle<JSArray> shared_info_array);
+
+ static void RelinkFunctionToScript(Handle<JSArray> shared_info_array,
+ Handle<Script> script_handle);
+
+ static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
+ Handle<JSArray> position_change_array);
+
+ // A copy of this is in liveedit-delay.js.
+ enum FunctionPatchabilityStatus {
+ FUNCTION_AVAILABLE_FOR_PATCH = 0,
+ FUNCTION_BLOCKED_ON_STACK = 1
+ };
+};
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
} } // namespace v8::internal
#endif /* V*_LIVEEDIT_H_ */
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index a3fef7310b..588d345499 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -329,7 +329,7 @@ VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
-bool Logger::is_logging_ = false;
+int Logger::logging_nesting_ = 0;
int Logger::cpu_profiler_nesting_ = 0;
int Logger::heap_profiler_nesting_ = 0;
@@ -389,12 +389,19 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) {
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
+ if (FLAG_log) UncheckedIntEvent(name, value);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::UncheckedIntEvent(const char* name, int value) {
+ if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
-#endif
}
+#endif
void Logger::HandleEvent(const char* name, Object** location) {
@@ -1169,19 +1176,18 @@ void Logger::PauseProfiler(int flags, int tag) {
// Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause"));
}
+ --logging_nesting_;
}
}
if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
if (--heap_profiler_nesting_ == 0) {
FLAG_log_gc = false;
+ --logging_nesting_;
}
}
if (tag != 0) {
- IntEvent("close-tag", tag);
- }
- if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
- is_logging_ = false;
+ UncheckedIntEvent("close-tag", tag);
}
}
@@ -1189,11 +1195,11 @@ void Logger::PauseProfiler(int flags, int tag) {
void Logger::ResumeProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return;
if (tag != 0) {
- IntEvent("open-tag", tag);
+ UncheckedIntEvent("open-tag", tag);
}
if (flags & PROFILER_MODULE_CPU) {
if (cpu_profiler_nesting_++ == 0) {
- is_logging_ = true;
+ ++logging_nesting_;
if (FLAG_prof_lazy) {
profiler_->Engage();
LOG(UncheckedStringEvent("profiler", "resume"));
@@ -1209,7 +1215,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
if (heap_profiler_nesting_++ == 0) {
- is_logging_ = true;
+ ++logging_nesting_;
FLAG_log_gc = true;
}
}
@@ -1261,6 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
switch (code_object->kind()) {
case Code::FUNCTION:
return; // We log this later using LogCompiledFunctions.
+ case Code::BINARY_OP_IC:
+ // fall through
case Code::STUB:
description = CodeStub::MajorName(code_object->major_key(), true);
if (description == NULL)
@@ -1482,14 +1490,16 @@ bool Logger::Setup() {
compression_helper_ = new CompressionHelper(kCompressionWindowSize);
}
- is_logging_ = start_logging;
+ if (start_logging) {
+ logging_nesting_ = 1;
+ }
if (FLAG_prof) {
profiler_ = new Profiler();
if (!FLAG_prof_auto) {
profiler_->pause();
} else {
- is_logging_ = true;
+ logging_nesting_ = 1;
}
if (!FLAG_prof_lazy) {
profiler_->Engage();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index eb8369cf00..613a1e26cf 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -265,7 +265,7 @@ class Logger {
}
static bool is_logging() {
- return is_logging_;
+ return logging_nesting_ > 0;
}
// Pause/Resume collection of profiling data.
@@ -330,6 +330,9 @@ class Logger {
// Logs a StringEvent regardless of whether FLAG_log is true.
static void UncheckedStringEvent(const char* name, const char* value);
+ // Logs an IntEvent regardless of whether FLAG_log is true.
+ static void UncheckedIntEvent(const char* name, int value);
+
// Stops logging and profiling in case of insufficient resources.
static void StopLoggingAndProfiling();
@@ -372,7 +375,7 @@ class Logger {
friend class LoggerTestHelper;
- static bool is_logging_;
+ static int logging_nesting_;
static int cpu_profiler_nesting_;
static int heap_profiler_nesting_;
#else
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index ccc2037f23..9da2552479 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -73,6 +73,16 @@ const kDayMask = 0x01f;
const kYearShift = 9;
const kMonthShift = 5;
+# Limits for parts of the date, so that we support all the dates that
+# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
+# the date (days since 1970) is in SMI range.
+const kMinYear = -1000000;
+const kMaxYear = 1000000;
+const kMinMonth = -10000000;
+const kMaxMonth = 10000000;
+const kMinDate = -100000000;
+const kMaxDate = 100000000;
+
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 5745e61798..034f32d7fc 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -84,7 +84,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %Math_cos(x);
+ return %_Math_cos(x);
}
// ECMA 262 - 15.8.2.8
@@ -159,7 +159,7 @@ function MathMin(arg1, arg2) { // length == 2
function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
- return %Math_pow(x, y);
+ return %_Math_pow(x, y);
}
// ECMA 262 - 15.8.2.14
@@ -176,13 +176,13 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %Math_sin(x);
+ return %_Math_sin(x);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %Math_sqrt(x);
+ return %_Math_sqrt(x);
}
// ECMA 262 - 15.8.2.18
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index e16b1b2495..7cb1d20227 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -30,6 +30,7 @@
#include "api.h"
#include "execution.h"
+#include "messages.h"
#include "spaces-inl.h"
#include "top.h"
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index ca82afe533..5848115059 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -127,6 +127,7 @@ function FormatMessage(message) {
malformed_regexp: "Invalid regular expression: /%0/: %1",
unterminated_regexp: "Invalid regular expression: missing /",
regexp_flags: "Cannot supply flags when constructing one RegExp from another",
+ incompatible_method_receiver: "Method %0 called on incompatible receiver %1",
invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 2de45f67f0..facccc288d 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -81,7 +81,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
// a1: called JS function
// cp: callee's context
-void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void CodeGenerator::Generate(CompilationInfo* infomode) {
UNIMPLEMENTED_MIPS();
}
@@ -292,6 +292,16 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
@@ -300,6 +310,11 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -457,6 +472,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
UNIMPLEMENTED_MIPS();
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // ********** State **********
+ //
+ // * Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ //
+ // * Stack:
+ // ---------------------------
+ // args
+ // ---------------------------
+ // 4 args slots
+ // ---------------------------
+ // callee saved registers + ra
+ // ---------------------------
+ //
+ // ***************************
+
+ __ break_(0x1234);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+
// Load a result.
__ li(v0, Operand(0x1234));
__ jr(ra);
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 147b8724ed..987dcca879 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -157,11 +157,10 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
- virtual ~CodeGenerator() { delete masm_; }
// Accessors.
inline bool is_eval();
- Scope* scope() const { return scope_; }
+ inline Scope* scope();
// Generating deferred code.
void ProcessDeferred();
@@ -184,7 +183,7 @@ class CodeGenerator: public AstVisitor {
#undef DEF_VISIT
// Main code generation function
- void Generate(CompilationInfo* info, Mode mode);
+ void Generate(CompilationInfo* info);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
@@ -227,6 +226,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateCharFromCode(ZoneList<Expression*>* args);
+
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -244,6 +246,11 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast support for Math.pow().
+ void GenerateMathPow(ZoneList<Expression*>* args);
+ // Fast support for Math.sqrt().
+ void GenerateMathPow(ZoneList<Expression*>* args);
+
// Fast support for Math.sin and Math.cos.
inline void GenerateMathSin(ZoneList<Expression*>* args);
@@ -302,6 +309,7 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
+ friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
diff --git a/deps/v8/src/mips/fast-codegen-mips.cc b/deps/v8/src/mips/fast-codegen-mips.cc
index c47f6326d8..48a0ce6c75 100644
--- a/deps/v8/src/mips/fast-codegen-mips.cc
+++ b/deps/v8/src/mips/fast-codegen-mips.cc
@@ -35,6 +35,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+Register FastCodeGenerator::accumulator0() { return no_reg; }
+Register FastCodeGenerator::accumulator1() { return no_reg; }
+Register FastCodeGenerator::scratch0() { return no_reg; }
+Register FastCodeGenerator::scratch1() { return no_reg; }
+Register FastCodeGenerator::receiver_reg() { return no_reg; }
+Register FastCodeGenerator::context_reg() { return no_reg; }
+
+
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
@@ -45,7 +53,17 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
UNIMPLEMENTED_MIPS();
}
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 920329eea4..3c29e99be9 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -146,6 +146,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 5598cdfcd1..605616626a 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -90,11 +90,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@@ -120,11 +115,6 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@@ -145,24 +135,23 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
}
-void KeyedStoreIC::Generate(MacroAssembler* masm,
- const ExternalReference& f) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -172,12 +161,12 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc
index 3301d19911..e8398a849c 100644
--- a/deps/v8/src/mips/jump-target-mips.cc
+++ b/deps/v8/src/mips/jump-target-mips.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index b733bdd926..e49858b1d8 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -422,7 +422,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
// Trashes the at register if no scratch register is provided.
void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
@@ -489,7 +489,7 @@ void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -559,7 +559,7 @@ void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -634,7 +634,7 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -787,6 +787,16 @@ void MacroAssembler::Call(Label* target) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void MacroAssembler::DebugBreak() {
+ UNIMPLEMENTED_MIPS();
+ }
+#endif
+
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -826,14 +836,21 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
}
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- UNIMPLEMENTED_MIPS();
+ TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
-void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
UNIMPLEMENTED_MIPS();
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index aea98366eb..b34488cef3 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -115,11 +115,7 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- // Sets the remembered set bit for [address+offset], where address is the
- // address of the heap object 'object'. The address must be in the first 8K
- // of an allocated page. The 'scratch' register is used in the
- // implementation and all 3 registers are clobbered by the operation, as
- // well as the ip register.
+ // Sets the remembered set bit for [address+offset].
void RecordWrite(Register object, Register offset, Register scratch);
@@ -182,19 +178,8 @@ class MacroAssembler: public Assembler {
// Push multiple registers on the stack.
- // With MultiPush, lower registers are pushed first on the stack.
- // For example if you push t0, t1, s0, and ra you get:
- // | |
- // |-----------------------|
- // | t0 | +
- // |-----------------------| |
- // | t1 | |
- // |-----------------------| |
- // | s0 | v
- // |-----------------------| -
- // | ra |
- // |-----------------------|
- // | |
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void Push(Register src) {
@@ -224,6 +209,20 @@ class MacroAssembler: public Assembler {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void SaveRegistersToMemory(RegList regs);
+ void RestoreRegistersFromMemory(RegList regs);
+ void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+ void CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs);
+ void DebugBreak();
+#endif
+
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -268,21 +267,25 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- // Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToRuntime, but also takes care of passing the number
+ // Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
- void TailCallRuntime(const ExternalReference& ext,
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to the builtin routine.
- void JumpToRuntime(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index a87a49b736..669fdaa3ce 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -72,20 +72,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x249);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -99,7 +85,6 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -120,18 +105,6 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- return at; // UNIMPLEMENTED RETURN
-}
-
-
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
@@ -192,7 +165,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
@@ -211,7 +184,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc
index fad7ec4c7c..e89882f2ae 100644
--- a/deps/v8/src/mips/virtual-frame-mips.cc
+++ b/deps/v8/src/mips/virtual-frame-mips.cc
@@ -32,6 +32,7 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
+#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@@ -41,17 +42,6 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-
-// On entry to a function, the virtual frame already contains the
-// receiver and the parameters. All initial frame elements are in
-// memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count()) { // 0-based index of TOS.
- UNIMPLEMENTED_MIPS();
-}
-
-
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
diff --git a/deps/v8/src/mips/virtual-frame-mips.h b/deps/v8/src/mips/virtual-frame-mips.h
index 79f973fb64..e5bc93fa28 100644
--- a/deps/v8/src/mips/virtual-frame-mips.h
+++ b/deps/v8/src/mips/virtual-frame-mips.h
@@ -61,16 +61,17 @@ class VirtualFrame : public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- VirtualFrame();
+ inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
- explicit VirtualFrame(VirtualFrame* original);
+ explicit inline VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index);
+ FrameElement CopyElementAt(int index,
+ NumberInfo::Type info = NumberInfo::kUnknown);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -366,9 +367,9 @@ class VirtualFrame : public ZoneObject {
void EmitMultiPushReversed(RegList regs); // higher first
// Push an element on the virtual frame.
- void Push(Register reg);
- void Push(Handle<Object> value);
- void Push(Smi* value) { Push(Handle<Object>(value)); }
+ inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+ inline void Push(Handle<Object> value);
+ inline void Push(Smi* value);
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
@@ -384,7 +385,7 @@ class VirtualFrame : public ZoneObject {
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- void Nip(int num_dropped);
+ inline void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers
// 'a' registers are arguments register a0 to a3.
@@ -483,7 +484,7 @@ class VirtualFrame : public ZoneObject {
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
- void PushFrameSlotAt(int index);
+ inline void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
@@ -534,7 +535,7 @@ class VirtualFrame : public ZoneObject {
// (via PrepareForCall).
void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
- bool Equals(VirtualFrame* other);
+ inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
diff --git a/deps/v8/src/number-info.h b/deps/v8/src/number-info.h
index c6f32e47c1..bfc0d9fd2d 100644
--- a/deps/v8/src/number-info.h
+++ b/deps/v8/src/number-info.h
@@ -31,42 +31,160 @@
namespace v8 {
namespace internal {
-class NumberInfo : public AllStatic {
+// Unknown
+// |
+// Number
+// / |
+// HeapNumber Integer32
+// | |
+// | Smi
+// | /
+// Uninitialized.
+
+class NumberInfo {
public:
- enum Type {
- kUnknown = 0,
- kNumber = 1,
- kSmi = 3,
- kHeapNumber = 5,
- kUninitialized = 7
- };
+ NumberInfo() { }
+
+ static inline NumberInfo Unknown();
+ // We know it's a number of some sort.
+ static inline NumberInfo Number();
+ // We know it's signed or unsigned 32 bit integer.
+ static inline NumberInfo Integer32();
+ // We know it's a Smi.
+ static inline NumberInfo Smi();
+ // We know it's a heap number.
+ static inline NumberInfo HeapNumber();
+ // We haven't started collecting info yet.
+ static inline NumberInfo Uninitialized();
+
+ // Return compact representation. Very sensitive to enum values below!
+ int ThreeBitRepresentation() {
+ ASSERT(type_ != kUninitializedType);
+ int answer = type_ > 6 ? type_ -2 : type_;
+ ASSERT(answer >= 0);
+ ASSERT(answer <= 7);
+ return answer;
+ }
+
+ // Decode compact representation. Very sensitive to enum values below!
+ static NumberInfo ExpandedRepresentation(int three_bit_representation) {
+ Type t = static_cast<Type>(three_bit_representation >= 6 ?
+ three_bit_representation + 2 :
+ three_bit_representation);
+ ASSERT(t == kUnknownType ||
+ t == kNumberType ||
+ t == kInteger32Type ||
+ t == kSmiType ||
+ t == kHeapNumberType);
+ return NumberInfo(t);
+ }
+
+ int ToInt() {
+ return type_;
+ }
+
+ static NumberInfo FromInt(int bit_representation) {
+ Type t = static_cast<Type>(bit_representation);
+ ASSERT(t == kUnknownType ||
+ t == kNumberType ||
+ t == kInteger32Type ||
+ t == kSmiType ||
+ t == kHeapNumberType);
+ return NumberInfo(t);
+ }
// Return the weakest (least precise) common type.
- static Type Combine(Type a, Type b) {
- // Make use of the order of enum values.
- return static_cast<Type>(a & b);
+ static NumberInfo Combine(NumberInfo a, NumberInfo b) {
+ return NumberInfo(static_cast<Type>(a.type_ & b.type_));
+ }
+
+ inline bool IsUnknown() {
+ return type_ == kUnknownType;
+ }
+
+ inline bool IsNumber() {
+ ASSERT(type_ != kUninitializedType);
+ return ((type_ & kNumberType) == kNumberType);
+ }
+
+ inline bool IsSmi() {
+ ASSERT(type_ != kUninitializedType);
+ return ((type_ & kSmiType) == kSmiType);
+ }
+
+ inline bool IsInteger32() {
+ ASSERT(type_ != kUninitializedType);
+ return ((type_ & kInteger32Type) == kInteger32Type);
+ }
+
+ inline bool IsHeapNumber() {
+ ASSERT(type_ != kUninitializedType);
+ return ((type_ & kHeapNumberType) == kHeapNumberType);
}
- static bool IsNumber(Type a) {
- ASSERT(a != kUninitialized);
- return ((a & kNumber) != 0);
+ inline bool IsUninitialized() {
+ return type_ == kUninitializedType;
}
- static const char* ToString(Type a) {
- switch (a) {
- case kUnknown: return "UnknownType";
- case kNumber: return "NumberType";
- case kSmi: return "SmiType";
- case kHeapNumber: return "HeapNumberType";
- case kUninitialized:
+ const char* ToString() {
+ switch (type_) {
+ case kUnknownType: return "UnknownType";
+ case kNumberType: return "NumberType";
+ case kSmiType: return "SmiType";
+ case kHeapNumberType: return "HeapNumberType";
+ case kInteger32Type: return "Integer32Type";
+ case kUninitializedType:
UNREACHABLE();
return "UninitializedType";
}
UNREACHABLE();
return "Unreachable code";
}
+
+ private:
+ enum Type {
+ kUnknownType = 0,
+ kNumberType = 1,
+ kInteger32Type = 3,
+ kSmiType = 7,
+ kHeapNumberType = 9,
+ kUninitializedType = 15
+ };
+ explicit inline NumberInfo(Type t) : type_(t) { }
+
+ Type type_;
};
+
+NumberInfo NumberInfo::Unknown() {
+ return NumberInfo(kUnknownType);
+}
+
+
+NumberInfo NumberInfo::Number() {
+ return NumberInfo(kNumberType);
+}
+
+
+NumberInfo NumberInfo::Integer32() {
+ return NumberInfo(kInteger32Type);
+}
+
+
+NumberInfo NumberInfo::Smi() {
+ return NumberInfo(kSmiType);
+}
+
+
+NumberInfo NumberInfo::HeapNumber() {
+ return NumberInfo(kHeapNumberType);
+}
+
+
+NumberInfo NumberInfo::Uninitialized() {
+ return NumberInfo(kUninitializedType);
+}
+
} } // namespace v8::internal
#endif // V8_NUMBER_INFO_H_
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 9415bc1a1c..492b492584 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -644,6 +644,24 @@ void Map::MapVerify() {
}
+void CodeCache::CodeCachePrint() {
+ HeapObject::PrintHeader("CodeCache");
+ PrintF("\n - default_cache: ");
+ default_cache()->ShortPrint();
+ PrintF("\n - normal_type_cache: ");
+ normal_type_cache()->ShortPrint();
+}
+
+
+void CodeCache::CodeCacheVerify() {
+ VerifyHeapPointer(default_cache());
+ VerifyHeapPointer(normal_type_cache());
+ ASSERT(default_cache()->IsFixedArray());
+ ASSERT(normal_type_cache()->IsUndefined()
+ || normal_type_cache()->IsCodeCacheHashTable());
+}
+
+
void FixedArray::FixedArrayPrint() {
HeapObject::PrintHeader("FixedArray");
PrintF(" - length: %d", length());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 455a84c8d0..18f45f3ba8 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -564,6 +564,11 @@ bool Object::IsCompilationCacheTable() {
}
+bool Object::IsCodeCacheHashTable() {
+ return IsHashTable();
+}
+
+
bool Object::IsMapCache() {
return IsHashTable();
}
@@ -840,15 +845,17 @@ Failure* Failure::OutOfMemoryException() {
intptr_t Failure::value() const {
- return reinterpret_cast<intptr_t>(this) >> kFailureTagSize;
+ return static_cast<intptr_t>(
+ reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
}
Failure* Failure::RetryAfterGC(int requested_bytes) {
// Assert that the space encoding fits in the three bytes allotted for it.
ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
- intptr_t requested = requested_bytes >> kObjectAlignmentBits;
- int tag_bits = kSpaceTagSize + kFailureTypeTagSize;
+ uintptr_t requested =
+ static_cast<uintptr_t>(requested_bytes >> kObjectAlignmentBits);
+ int tag_bits = kSpaceTagSize + kFailureTypeTagSize + kFailureTagSize;
if (((requested << tag_bits) >> tag_bits) != requested) {
// No room for entire requested size in the bits. Round down to
// maximally representable size.
@@ -861,7 +868,8 @@ Failure* Failure::RetryAfterGC(int requested_bytes) {
Failure* Failure::Construct(Type type, intptr_t value) {
- intptr_t info = (static_cast<intptr_t>(value) << kFailureTypeTagSize) | type;
+ uintptr_t info =
+ (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
}
@@ -1394,6 +1402,11 @@ void FixedArray::set_the_hole(int index) {
}
+Object** FixedArray::data_start() {
+ return HeapObject::RawField(this, kHeaderSize);
+}
+
+
bool DescriptorArray::IsEmpty() {
ASSERT(this == Heap::empty_descriptor_array() ||
this->length() > 2);
@@ -1560,6 +1573,7 @@ CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
@@ -1637,13 +1651,11 @@ bool String::Equals(String* other) {
}
-Object* String::TryFlattenIfNotFlat() {
+Object* String::TryFlatten(PretenureFlag pretenure) {
// We don't need to flatten strings that are already flat. Since this code
// is inlined, it can be helpful in the flat case to not call out to Flatten.
- if (!IsFlat()) {
- return TryFlatten();
- }
- return this;
+ if (IsFlat()) return this;
+ return SlowTryFlatten(pretenure);
}
@@ -2143,14 +2155,14 @@ int Code::arguments_count() {
CodeStub::Major Code::major_key() {
- ASSERT(kind() == STUB);
+ ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
kStubMajorKeyOffset));
}
void Code::set_major_key(CodeStub::Major major) {
- ASSERT(kind() == STUB);
+ ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
}
@@ -2252,7 +2264,7 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
-ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
+ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -2390,6 +2402,9 @@ INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
+ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
+ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
+
bool Script::HasValidSource() {
Object* src = this->source();
if (!src->IsString()) return true;
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 53423af523..7f4ab0b6c0 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -673,7 +673,7 @@ static bool AnWord(String* str) {
}
-Object* String::TryFlatten() {
+Object* String::SlowTryFlatten(PretenureFlag pretenure) {
#ifdef DEBUG
// Do not attempt to flatten in debug mode when allocation is not
// allowed. This is to avoid an assertion failure when allocating.
@@ -691,7 +691,7 @@ Object* String::TryFlatten() {
// There's little point in putting the flat string in new space if the
// cons string is in old space. It can never get GCed until there is
// an old space GC.
- PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED : TENURED;
+ PretenureFlag tenure = Heap::InNewSpace(this) ? pretenure : TENURED;
int len = length();
Object* object;
String* result;
@@ -2180,7 +2180,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
property_count += 2; // Make space for two more properties.
}
Object* obj =
- StringDictionary::Allocate(property_count * 2);
+ StringDictionary::Allocate(property_count);
if (obj->IsFailure()) return obj;
StringDictionary* dictionary = StringDictionary::cast(obj);
@@ -2768,7 +2768,7 @@ Object* JSObject::DefineGetterSetter(String* name,
}
// Try to flatten before operating on the string.
- name->TryFlattenIfNotFlat();
+ name->TryFlatten();
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
@@ -3033,19 +3033,79 @@ Object* Map::CopyDropTransitions() {
Object* Map::UpdateCodeCache(String* name, Code* code) {
+ // Allocate the code cache if not present.
+ if (code_cache()->IsFixedArray()) {
+ Object* result = Heap::AllocateCodeCache();
+ if (result->IsFailure()) return result;
+ set_code_cache(result);
+ }
+
+ // Update the code cache.
+ return CodeCache::cast(code_cache())->Update(name, code);
+}
+
+
+Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
+ // Do a lookup if a code cache exists.
+ if (!code_cache()->IsFixedArray()) {
+ return CodeCache::cast(code_cache())->Lookup(name, flags);
+ } else {
+ return Heap::undefined_value();
+ }
+}
+
+
+int Map::IndexInCodeCache(String* name, Code* code) {
+ // Get the internal index if a code cache exists.
+ if (!code_cache()->IsFixedArray()) {
+ return CodeCache::cast(code_cache())->GetIndex(name, code);
+ }
+ return -1;
+}
+
+
+void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
+ // No GC is supposed to happen between a call to IndexInCodeCache and
+ // RemoveFromCodeCache so the code cache must be there.
+ ASSERT(!code_cache()->IsFixedArray());
+ CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
+}
+
+
+Object* CodeCache::Update(String* name, Code* code) {
ASSERT(code->ic_state() == MONOMORPHIC);
- FixedArray* cache = code_cache();
- // When updating the code cache we disregard the type encoded in the
+ // The number of monomorphic stubs for normal load/store/call IC's can grow to
+ // a large number and therefore they need to go into a hash table. They are
+ // used to load global properties from cells.
+ if (code->type() == NORMAL) {
+ // Make sure that a hash table is allocated for the normal load code cache.
+ if (normal_type_cache()->IsUndefined()) {
+ Object* result =
+ CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize);
+ if (result->IsFailure()) return result;
+ set_normal_type_cache(result);
+ }
+ return UpdateNormalTypeCache(name, code);
+ } else {
+ ASSERT(default_cache()->IsFixedArray());
+ return UpdateDefaultCache(name, code);
+ }
+}
+
+
+Object* CodeCache::UpdateDefaultCache(String* name, Code* code) {
+ // When updating the default code cache we disregard the type encoded in the
// flags. This allows call constant stubs to overwrite call field
// stubs, etc.
Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
// First check whether we can update existing code cache without
// extending it.
+ FixedArray* cache = default_cache();
int length = cache->length();
int deleted_index = -1;
- for (int i = 0; i < length; i += 2) {
+ for (int i = 0; i < length; i += kCodeCacheEntrySize) {
Object* key = cache->get(i);
if (key->IsNull()) {
if (deleted_index < 0) deleted_index = i;
@@ -3053,14 +3113,15 @@ Object* Map::UpdateCodeCache(String* name, Code* code) {
}
if (key->IsUndefined()) {
if (deleted_index >= 0) i = deleted_index;
- cache->set(i + 0, name);
- cache->set(i + 1, code);
+ cache->set(i + kCodeCacheEntryNameOffset, name);
+ cache->set(i + kCodeCacheEntryCodeOffset, code);
return this;
}
if (name->Equals(String::cast(key))) {
- Code::Flags found = Code::cast(cache->get(i + 1))->flags();
+ Code::Flags found =
+ Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
if (Code::RemoveTypeFromFlags(found) == flags) {
- cache->set(i + 1, code);
+ cache->set(i + kCodeCacheEntryCodeOffset, code);
return this;
}
}
@@ -3069,61 +3130,207 @@ Object* Map::UpdateCodeCache(String* name, Code* code) {
// Reached the end of the code cache. If there were deleted
// elements, reuse the space for the first of them.
if (deleted_index >= 0) {
- cache->set(deleted_index + 0, name);
- cache->set(deleted_index + 1, code);
+ cache->set(deleted_index + kCodeCacheEntryNameOffset, name);
+ cache->set(deleted_index + kCodeCacheEntryCodeOffset, code);
return this;
}
- // Extend the code cache with some new entries (at least one).
- int new_length = length + ((length >> 1) & ~1) + 2;
- ASSERT((new_length & 1) == 0); // must be a multiple of two
+ // Extend the code cache with some new entries (at least one). Must be a
+ // multiple of the entry size.
+ int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
+ new_length = new_length - new_length % kCodeCacheEntrySize;
+ ASSERT((new_length % kCodeCacheEntrySize) == 0);
Object* result = cache->CopySize(new_length);
if (result->IsFailure()) return result;
// Add the (name, code) pair to the new cache.
cache = FixedArray::cast(result);
- cache->set(length + 0, name);
- cache->set(length + 1, code);
- set_code_cache(cache);
+ cache->set(length + kCodeCacheEntryNameOffset, name);
+ cache->set(length + kCodeCacheEntryCodeOffset, code);
+ set_default_cache(cache);
return this;
}
-Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
- FixedArray* cache = code_cache();
+Object* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
+ // Adding a new entry can cause a new cache to be allocated.
+ CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+ Object* new_cache = cache->Put(name, code);
+ if (new_cache->IsFailure()) return new_cache;
+ set_normal_type_cache(new_cache);
+ return this;
+}
+
+
+Object* CodeCache::Lookup(String* name, Code::Flags flags) {
+ if (Code::ExtractTypeFromFlags(flags) == NORMAL) {
+ return LookupNormalTypeCache(name, flags);
+ } else {
+ return LookupDefaultCache(name, flags);
+ }
+}
+
+
+Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
+ FixedArray* cache = default_cache();
int length = cache->length();
- for (int i = 0; i < length; i += 2) {
- Object* key = cache->get(i);
+ for (int i = 0; i < length; i += kCodeCacheEntrySize) {
+ Object* key = cache->get(i + kCodeCacheEntryNameOffset);
// Skip deleted elements.
if (key->IsNull()) continue;
if (key->IsUndefined()) return key;
if (name->Equals(String::cast(key))) {
- Code* code = Code::cast(cache->get(i + 1));
- if (code->flags() == flags) return code;
+ Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
+ if (code->flags() == flags) {
+ return code;
+ }
}
}
return Heap::undefined_value();
}
-int Map::IndexInCodeCache(Code* code) {
- FixedArray* array = code_cache();
+Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
+ if (!normal_type_cache()->IsUndefined()) {
+ CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+ return cache->Lookup(name, flags);
+ } else {
+ return Heap::undefined_value();
+ }
+}
+
+
+int CodeCache::GetIndex(String* name, Code* code) {
+ // This is not used for normal load/store/call IC's.
+ if (code->type() == NORMAL) {
+ if (normal_type_cache()->IsUndefined()) return -1;
+ CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+ return cache->GetIndex(name, code->flags());
+ }
+
+ FixedArray* array = default_cache();
int len = array->length();
- for (int i = 0; i < len; i += 2) {
- if (array->get(i + 1) == code) return i + 1;
+ for (int i = 0; i < len; i += kCodeCacheEntrySize) {
+ if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
}
return -1;
}
-void Map::RemoveFromCodeCache(int index) {
- FixedArray* array = code_cache();
- ASSERT(array->length() >= index && array->get(index)->IsCode());
- // Use null instead of undefined for deleted elements to distinguish
- // deleted elements from unused elements. This distinction is used
- // when looking up in the cache and when updating the cache.
- array->set_null(index - 1); // key
- array->set_null(index); // code
+void CodeCache::RemoveByIndex(String* name, Code* code, int index) {
+ if (code->type() == NORMAL) {
+ ASSERT(!normal_type_cache()->IsUndefined());
+ CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+ ASSERT(cache->GetIndex(name, code->flags()) == index);
+ cache->RemoveByIndex(index);
+ } else {
+ FixedArray* array = default_cache();
+ ASSERT(array->length() >= index && array->get(index)->IsCode());
+ // Use null instead of undefined for deleted elements to distinguish
+ // deleted elements from unused elements. This distinction is used
+ // when looking up in the cache and when updating the cache.
+ ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
+ array->set_null(index - 1); // Name.
+ array->set_null(index); // Code.
+ }
+}
+
+
+// The key in the code cache hash table consists of the property name and the
+// code object. The actual match is on the name and the code flags. If a key
+// is created using the flags and not a code object it can only be used for
+// lookup not to create a new entry.
+class CodeCacheHashTableKey : public HashTableKey {
+ public:
+ CodeCacheHashTableKey(String* name, Code::Flags flags)
+ : name_(name), flags_(flags), code_(NULL) { }
+
+ CodeCacheHashTableKey(String* name, Code* code)
+ : name_(name),
+ flags_(code->flags()),
+ code_(code) { }
+
+
+ bool IsMatch(Object* other) {
+ if (!other->IsFixedArray()) return false;
+ FixedArray* pair = FixedArray::cast(other);
+ String* name = String::cast(pair->get(0));
+ Code::Flags flags = Code::cast(pair->get(1))->flags();
+ if (flags != flags_) {
+ return false;
+ }
+ return name_->Equals(name);
+ }
+
+ static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) {
+ return name->Hash() ^ flags;
+ }
+
+ uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); }
+
+ uint32_t HashForObject(Object* obj) {
+ FixedArray* pair = FixedArray::cast(obj);
+ String* name = String::cast(pair->get(0));
+ Code* code = Code::cast(pair->get(1));
+ return NameFlagsHashHelper(name, code->flags());
+ }
+
+ Object* AsObject() {
+ ASSERT(code_ != NULL);
+ Object* obj = Heap::AllocateFixedArray(2);
+ if (obj->IsFailure()) return obj;
+ FixedArray* pair = FixedArray::cast(obj);
+ pair->set(0, name_);
+ pair->set(1, code_);
+ return pair;
+ }
+
+ private:
+ String* name_;
+ Code::Flags flags_;
+ Code* code_;
+};
+
+
+Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
+ CodeCacheHashTableKey key(name, flags);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) return Heap::undefined_value();
+ return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* CodeCacheHashTable::Put(String* name, Code* code) {
+ CodeCacheHashTableKey key(name, code);
+ Object* obj = EnsureCapacity(1, &key);
+ if (obj->IsFailure()) return obj;
+
+ // Don't use this, as the table might have grown.
+ CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
+
+ int entry = cache->FindInsertionEntry(key.Hash());
+ Object* k = key.AsObject();
+ if (k->IsFailure()) return k;
+
+ cache->set(EntryToIndex(entry), k);
+ cache->set(EntryToIndex(entry) + 1, code);
+ cache->ElementAdded();
+ return cache;
+}
+
+
+int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
+ CodeCacheHashTableKey key(name, flags);
+ int entry = FindEntry(&key);
+ return (entry == kNotFound) ? -1 : entry;
+}
+
+
+void CodeCacheHashTable::RemoveByIndex(int index) {
+ ASSERT(index >= 0);
+ set(EntryToIndex(index), Heap::null_value());
+ set(EntryToIndex(index) + 1, Heap::null_value());
+ ElementRemoved();
}
@@ -3430,18 +3637,25 @@ void DescriptorArray::Sort() {
int len = number_of_descriptors();
// Bottom-up max-heap construction.
- for (int i = 1; i < len; ++i) {
- int child_index = i;
- while (child_index > 0) {
- int parent_index = ((child_index + 1) >> 1) - 1;
- uint32_t parent_hash = GetKey(parent_index)->Hash();
+ // Index of the last node with children
+ const int max_parent_index = (len / 2) - 1;
+ for (int i = max_parent_index; i >= 0; --i) {
+ int parent_index = i;
+ const uint32_t parent_hash = GetKey(i)->Hash();
+ while (parent_index <= max_parent_index) {
+ int child_index = 2 * parent_index + 1;
uint32_t child_hash = GetKey(child_index)->Hash();
- if (parent_hash < child_hash) {
- Swap(parent_index, child_index);
- } else {
- break;
+ if (child_index + 1 < len) {
+ uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ if (right_child_hash > child_hash) {
+ child_index++;
+ child_hash = right_child_hash;
+ }
}
- child_index = parent_index;
+ if (child_hash <= parent_hash) break;
+ Swap(parent_index, child_index);
+ // Now element at child_index could be < its children.
+ parent_index = child_index; // parent_hash remains correct.
}
}
@@ -3451,21 +3665,21 @@ void DescriptorArray::Sort() {
Swap(0, i);
// Sift down the new top element.
int parent_index = 0;
- while (true) {
- int child_index = ((parent_index + 1) << 1) - 1;
- if (child_index >= i) break;
- uint32_t child1_hash = GetKey(child_index)->Hash();
- uint32_t child2_hash = GetKey(child_index + 1)->Hash();
- uint32_t parent_hash = GetKey(parent_index)->Hash();
- if (child_index + 1 >= i || child1_hash > child2_hash) {
- if (parent_hash > child1_hash) break;
- Swap(parent_index, child_index);
- parent_index = child_index;
- } else {
- if (parent_hash > child2_hash) break;
- Swap(parent_index, child_index + 1);
- parent_index = child_index + 1;
+ const uint32_t parent_hash = GetKey(parent_index)->Hash();
+ const int max_parent_index = (i / 2) - 1;
+ while (parent_index <= max_parent_index) {
+ int child_index = parent_index * 2 + 1;
+ uint32_t child_hash = GetKey(child_index)->Hash();
+ if (child_index + 1 < i) {
+ uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ if (right_child_hash > child_hash) {
+ child_index++;
+ child_hash = right_child_hash;
+ }
}
+ if (child_hash <= parent_hash) break;
+ Swap(parent_index, child_index);
+ parent_index = child_index;
}
}
@@ -3546,7 +3760,7 @@ int String::Utf8Length() {
// doesn't make Utf8Length faster, but it is very likely that
// the string will be accessed later (for example by WriteUtf8)
// so it's still a good idea.
- TryFlattenIfNotFlat();
+ TryFlatten();
Access<StringInputBuffer> buffer(&string_input_buffer);
buffer->Reset(0, this);
int result = 0;
@@ -4637,9 +4851,9 @@ uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
}
-Object* String::SubString(int start, int end) {
+Object* String::SubString(int start, int end, PretenureFlag pretenure) {
if (start == 0 && end == length()) return this;
- Object* result = Heap::AllocateSubString(this, start, end);
+ Object* result = Heap::AllocateSubString(this, start, end, pretenure);
return result;
}
@@ -4951,11 +5165,9 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
- IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
- IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
- IteratePointers(v, kThisPropertyAssignmentsOffset,
- kThisPropertyAssignmentsOffset + kPointerSize);
+ IteratePointers(v,
+ kNameOffset,
+ kThisPropertyAssignmentsOffset + kPointerSize);
}
@@ -5126,6 +5338,7 @@ const char* Code::Kind2String(Kind kind) {
case STORE_IC: return "STORE_IC";
case KEYED_STORE_IC: return "KEYED_STORE_IC";
case CALL_IC: return "CALL_IC";
+ case BINARY_OP_IC: return "BINARY_OP_IC";
}
UNREACHABLE();
return NULL;
@@ -5247,7 +5460,7 @@ Object* JSObject::SetSlowElements(Object* len) {
case DICTIONARY_ELEMENTS: {
if (IsJSArray()) {
uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+ static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
element_dictionary()->RemoveNumberEntries(new_length, old_length),
JSArray::cast(this)->set_length(len);
}
@@ -6903,15 +7116,17 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
-Object* HashTable<Shape, Key>::Allocate(int at_least_space_for) {
- int capacity = RoundUpToPowerOf2(at_least_space_for);
- if (capacity < 4) {
- capacity = 4; // Guarantee min capacity.
+Object* HashTable<Shape, Key>::Allocate(int at_least_space_for,
+ PretenureFlag pretenure) {
+ const int kMinCapacity = 32;
+ int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+ if (capacity < kMinCapacity) {
+ capacity = kMinCapacity; // Guarantee min capacity.
} else if (capacity > HashTable::kMaxCapacity) {
return Failure::OutOfMemoryException();
}
- Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
+ Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity), pretenure);
if (!obj->IsFailure()) {
HashTable::cast(obj)->SetNumberOfElements(0);
HashTable::cast(obj)->SetNumberOfDeletedElements(0);
@@ -6946,10 +7161,15 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
// Return if:
// 50% is still free after adding n elements and
// at most 50% of the free elements are deleted elements.
- if ((nof + (nof >> 1) <= capacity) &&
- (nod <= (capacity - nof) >> 1)) return this;
+ if (nod <= (capacity - nof) >> 1) {
+ int needed_free = nof >> 1;
+ if (nof + needed_free <= capacity) return this;
+ }
- Object* obj = Allocate(nof * 2);
+ const int kMinCapacityForPretenure = 256;
+ bool pretenure =
+ (capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(this);
+ Object* obj = Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
@@ -6981,7 +7201,6 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
-
template<typename Shape, typename Key>
uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
@@ -7091,8 +7310,7 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
result_double = HeapNumber::cast(new_double);
}
- int capacity = dict->Capacity();
- Object* obj = NumberDictionary::Allocate(dict->Capacity());
+ Object* obj = NumberDictionary::Allocate(dict->NumberOfElements());
if (obj->IsFailure()) return obj;
NumberDictionary* new_dict = NumberDictionary::cast(obj);
@@ -7100,6 +7318,7 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
uint32_t pos = 0;
uint32_t undefs = 0;
+ int capacity = dict->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dict->KeyAt(i);
if (dict->IsKey(k)) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 75338920c1..826fcae44a 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -72,6 +72,7 @@
// - Dictionary
// - SymbolTable
// - CompilationCacheTable
+// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
@@ -102,6 +103,7 @@
// - TypeSwitchInfo
// - DebugInfo
// - BreakPointInfo
+// - CodeCache
//
// Formats of Object*:
// Smi: [31 bit signed int] 0
@@ -269,6 +271,7 @@ enum PropertyNormalizationMode {
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
V(SCRIPT_TYPE) \
+ V(CODE_CACHE_TYPE) \
\
V(JS_VALUE_TYPE) \
V(JS_OBJECT_TYPE) \
@@ -364,7 +367,8 @@ enum PropertyNormalizationMode {
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
- V(SCRIPT, Script, script)
+ V(SCRIPT, Script, script) \
+ V(CODE_CACHE, CodeCache, code_cache)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define STRUCT_LIST_DEBUGGER(V) \
@@ -468,6 +472,7 @@ enum InstanceType {
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
+ CODE_CACHE_TYPE,
#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
@@ -601,6 +606,7 @@ class Object BASE_EMBEDDED {
inline bool IsDictionary();
inline bool IsSymbolTable();
inline bool IsCompilationCacheTable();
+ inline bool IsCodeCacheHashTable();
inline bool IsMapCache();
inline bool IsPrimitive();
inline bool IsGlobalObject();
@@ -1626,6 +1632,9 @@ class FixedArray: public Array {
inline void set_null(int index);
inline void set_the_hole(int index);
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
// Copy operations.
inline Object* Copy();
Object* CopySize(int new_length);
@@ -1933,7 +1942,8 @@ class HashTable: public FixedArray {
}
// Returns a new HashTable object. Might return Failure.
- static Object* Allocate(int at_least_space_for);
+ static Object* Allocate(int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED);
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
@@ -1965,6 +1975,8 @@ class HashTable: public FixedArray {
static const int kEntrySize = Shape::kEntrySize;
static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
+ static const int kCapacityOffset =
+ kHeaderSize + kCapacityIndex * kPointerSize;
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
@@ -2615,13 +2627,14 @@ class Code: public HeapObject {
CALL_IC,
STORE_IC,
KEYED_STORE_IC,
- // No more than eight kinds. The value currently encoded in three bits in
+ BINARY_OP_IC,
+ // No more than 16 kinds. The value currently encoded in four bits in
// Flags.
// Pseudo-kinds.
REGEXP = BUILTIN,
FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = KEYED_STORE_IC
+ LAST_IC_KIND = BINARY_OP_IC
};
enum {
@@ -2667,7 +2680,7 @@ class Code: public HeapObject {
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
- // [major_key]: For kind STUB, the major key.
+ // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline CodeStub::Major major_key();
inline void set_major_key(CodeStub::Major major);
@@ -2774,14 +2787,14 @@ class Code: public HeapObject {
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
static const int kFlagsKindShift = 4;
- static const int kFlagsTypeShift = 7;
- static const int kFlagsArgumentsCountShift = 10;
+ static const int kFlagsTypeShift = 8;
+ static const int kFlagsArgumentsCountShift = 11;
- static const int kFlagsICStateMask = 0x00000007; // 0000000111
- static const int kFlagsICInLoopMask = 0x00000008; // 0000001000
- static const int kFlagsKindMask = 0x00000070; // 0001110000
- static const int kFlagsTypeMask = 0x00000380; // 1110000000
- static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
+ static const int kFlagsICStateMask = 0x00000007; // 00000000111
+ static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
+ static const int kFlagsKindMask = 0x000000F0; // 00011110000
+ static const int kFlagsTypeMask = 0x00000700; // 11100000000
+ static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsNotUsedInLookup =
(kFlagsICInLoopMask | kFlagsTypeMask);
@@ -2922,7 +2935,7 @@ class Map: public HeapObject {
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
// [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, FixedArray)
+ DECL_ACCESSORS(code_cache, Object)
Object* CopyDropDescriptors();
@@ -2958,10 +2971,10 @@ class Map: public HeapObject {
// Returns the non-negative index of the code object if it is in the
// cache and -1 otherwise.
- int IndexInCodeCache(Code* code);
+ int IndexInCodeCache(String* name, Code* code);
// Removes a code object from the code cache at the given index.
- void RemoveFromCodeCache(int index);
+ void RemoveFromCodeCache(String* name, Code* code, int index);
// For every transition in this map, makes the transition's
// target's prototype pointer point back to this map.
@@ -3026,6 +3039,11 @@ class Map: public HeapObject {
static const int kNeedsLoading = 0;
static const int kIsExtensible = 1;
+ // Layout of the default cache. It holds alternating name and code objects.
+ static const int kCodeCacheEntrySize = 2;
+ static const int kCodeCacheEntryNameOffset = 0;
+ static const int kCodeCacheEntryCodeOffset = 1;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -3275,11 +3293,22 @@ class SharedFunctionInfo: public HeapObject {
static const int kDontAdaptArgumentsSentinel = -1;
// Layout description.
- // (An even number of integers has a size that is a multiple of a pointer.)
+ // Pointer fields.
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
static const int kConstructStubOffset = kCodeOffset + kPointerSize;
- static const int kLengthOffset = kConstructStubOffset + kPointerSize;
+ static const int kInstanceClassNameOffset =
+ kConstructStubOffset + kPointerSize;
+ static const int kExternalReferenceDataOffset =
+ kInstanceClassNameOffset + kPointerSize;
+ static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
+ static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
+ static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
+ static const int kThisPropertyAssignmentsOffset =
+ kInferredNameOffset + kPointerSize;
+ // Integer fields.
+ static const int kLengthOffset =
+ kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
@@ -3287,27 +3316,14 @@ class SharedFunctionInfo: public HeapObject {
kExpectedNofPropertiesOffset + kIntSize;
static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
- static const int kInstanceClassNameOffset =
+ static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
- static const int kExternalReferenceDataOffset =
- kInstanceClassNameOffset + kPointerSize;
- static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
- static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
- static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
- static const int kCompilerHintsOffset = kInferredNameOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
- kCompilerHintsOffset + kPointerSize;
static const int kThisPropertyAssignmentsCountOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
- static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+ kCompilerHintsOffset + kIntSize;
+ // Total size.
+ static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
private:
- // Bit positions in length_and_flg.
- // The least significant bit is used as the flag.
- static const int kFlagBit = 0;
- static const int kLengthShift = 1;
- static const int kLengthMask = ~((1 << kLengthShift) - 1);
-
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
@@ -3711,6 +3727,97 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
};
+class CodeCache: public Struct {
+ public:
+ DECL_ACCESSORS(default_cache, FixedArray)
+ DECL_ACCESSORS(normal_type_cache, Object)
+
+ // Add the code object to the cache.
+ Object* Update(String* name, Code* code);
+
+ // Lookup code object in the cache. Returns code object if found and undefined
+ // if not.
+ Object* Lookup(String* name, Code::Flags flags);
+
+ // Get the internal index of a code object in the cache. Returns -1 if the
+ // code object is not in that cache. This index can be used to later call
+ // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
+ // RemoveByIndex.
+ int GetIndex(String* name, Code* code);
+
+ // Remove an object from the cache with the provided internal index.
+ void RemoveByIndex(String* name, Code* code, int index);
+
+ static inline CodeCache* cast(Object* obj);
+
+#ifdef DEBUG
+ void CodeCachePrint();
+ void CodeCacheVerify();
+#endif
+
+ static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
+ static const int kNormalTypeCacheOffset =
+ kDefaultCacheOffset + kPointerSize;
+ static const int kSize = kNormalTypeCacheOffset + kPointerSize;
+
+ private:
+ Object* UpdateDefaultCache(String* name, Code* code);
+ Object* UpdateNormalTypeCache(String* name, Code* code);
+ Object* LookupDefaultCache(String* name, Code::Flags flags);
+ Object* LookupNormalTypeCache(String* name, Code::Flags flags);
+
+ // Code cache layout of the default cache. Elements are alternating name and
+ // code objects for non normal load/store/call IC's.
+ static const int kCodeCacheEntrySize = 2;
+ static const int kCodeCacheEntryNameOffset = 0;
+ static const int kCodeCacheEntryCodeOffset = 1;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
+};
+
+
+class CodeCacheHashTableShape {
+ public:
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
+ return key->IsMatch(value);
+ }
+
+ static inline uint32_t Hash(HashTableKey* key) {
+ return key->Hash();
+ }
+
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
+ return key->HashForObject(object);
+ }
+
+ static Object* AsObject(HashTableKey* key) {
+ return key->AsObject();
+ }
+
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+};
+
+
+class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
+ HashTableKey*> {
+ public:
+ Object* Lookup(String* name, Code::Flags flags);
+ Object* Put(String* name, Code* code);
+
+ int GetIndex(String* name, Code::Flags flags);
+ void RemoveByIndex(int index);
+
+ static inline CodeCacheHashTable* cast(Object* obj);
+
+ // Initial size of the fixed array backing the hash table.
+ static const int kInitialSize = 64;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
+};
+
+
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
@@ -3837,13 +3944,13 @@ class String: public HeapObject {
// Try to flatten the top level ConsString that is hiding behind this
// string. This is a no-op unless the string is a ConsString. Flatten
// mutates the ConsString and might return a failure.
- Object* TryFlatten();
+ Object* SlowTryFlatten(PretenureFlag pretenure);
// Try to flatten the string. Checks first inline to see if it is necessary.
- // Do not handle allocation failures. After calling TryFlattenIfNotFlat, the
+ // Do not handle allocation failures. After calling TryFlatten, the
// string could still be a ConsString, in which case a failure is returned.
// Use FlattenString from Handles.cc to be sure to flatten.
- inline Object* TryFlattenIfNotFlat();
+ inline Object* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
Vector<const char> ToAsciiVector();
Vector<const uc16> ToUC16Vector();
@@ -3853,7 +3960,7 @@ class String: public HeapObject {
bool MarkAsUndetectable();
// Return a substring.
- Object* SubString(int from, int to);
+ Object* SubString(int from, int to, PretenureFlag pretenure = NOT_TENURED);
// String equality operations.
inline bool Equals(String* other);
@@ -4051,10 +4158,6 @@ class SeqString: public String {
// Casting.
static inline SeqString* cast(Object* obj);
- // Dispatched behaviour.
- // For regexp code.
- uint16_t* SeqStringGetTwoByteAddress();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5058296db2..dd7266907d 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -31,6 +31,7 @@
#include "ast.h"
#include "bootstrapper.h"
#include "compiler.h"
+#include "messages.h"
#include "platform.h"
#include "runtime.h"
#include "parser.h"
@@ -107,13 +108,13 @@ class Parser {
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
- unibrow::CharacterStream* stream,
bool in_global_context);
FunctionLiteral* ParseLazy(Handle<String> source,
Handle<String> name,
- int start_position, bool is_expression);
- FunctionLiteral* ParseJson(Handle<String> source,
- unibrow::CharacterStream* stream);
+ int start_position,
+ int end_position,
+ bool is_expression);
+ FunctionLiteral* ParseJson(Handle<String> source);
// The minimum number of contiguous assignment that will
// be treated as an initialization block. Benchmarks show that
@@ -1212,7 +1213,7 @@ bool Parser::PreParseProgram(Handle<String> source,
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
- scanner_.Init(source, stream, 0, JAVASCRIPT);
+ scanner_.Initialize(source, stream, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
DummyScope top_scope;
@@ -1226,7 +1227,6 @@ bool Parser::PreParseProgram(Handle<String> source,
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
- unibrow::CharacterStream* stream,
bool in_global_context) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
@@ -1234,8 +1234,8 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
Counters::total_parse_size.Increment(source->length());
// Initialize parser state.
- source->TryFlattenIfNotFlat();
- scanner_.Init(source, stream, 0, JAVASCRIPT);
+ source->TryFlatten();
+ scanner_.Initialize(source, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
// Compute the parsing mode.
@@ -1286,15 +1286,15 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
FunctionLiteral* Parser::ParseLazy(Handle<String> source,
Handle<String> name,
int start_position,
+ int end_position,
bool is_expression) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
- source->TryFlattenIfNotFlat();
Counters::total_parse_size.Increment(source->length());
- SafeStringInputBuffer buffer(source.location());
// Initialize parser state.
- scanner_.Init(source, &buffer, start_position, JAVASCRIPT);
+ source->TryFlatten();
+ scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
@@ -1330,16 +1330,15 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
return result;
}
-FunctionLiteral* Parser::ParseJson(Handle<String> source,
- unibrow::CharacterStream* stream) {
+FunctionLiteral* Parser::ParseJson(Handle<String> source) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse);
Counters::total_parse_size.Increment(source->length());
// Initialize parser state.
- source->TryFlattenIfNotFlat();
- scanner_.Init(source, stream, 0, JSON);
+ source->TryFlatten(TENURED);
+ scanner_.Initialize(source, JSON);
ASSERT(target_stack_ == NULL);
FunctionLiteral* result = NULL;
@@ -3258,7 +3257,6 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
result = VariableProxySentinel::this_proxy();
} else {
VariableProxy* recv = top_scope_->receiver();
- recv->var_uses()->RecordRead(1);
result = recv;
}
break;
@@ -5065,13 +5063,12 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
return NULL;
}
Handle<String> source = Handle<String>(String::cast(script->source()));
- SafeStringInputBuffer input(source.location());
FunctionLiteral* result;
if (is_json) {
ASSERT(compile_in_global_context);
- result = parser.ParseJson(source, &input);
+ result = parser.ParseJson(source);
} else {
- result = parser.ParseProgram(source, &input, compile_in_global_context);
+ result = parser.ParseProgram(source, compile_in_global_context);
}
return result;
}
@@ -5086,13 +5083,11 @@ FunctionLiteral* MakeLazyAST(Handle<Script> script,
always_allow_natives_syntax = true;
AstBuildingParser parser(script, true, NULL, NULL); // always allow
always_allow_natives_syntax = allow_natives_syntax_before;
- // Parse the function by pulling the function source from the script source.
+ // Parse the function by pointing to the function source in the script source.
Handle<String> script_source(String::cast(script->source()));
FunctionLiteral* result =
- parser.ParseLazy(SubString(script_source, start_position, end_position),
- name,
- start_position,
- is_expression);
+ parser.ParseLazy(script_source, name,
+ start_position, end_position, is_expression);
return result;
}
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index ff75776856..3617e8af2b 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -567,6 +567,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
@@ -588,9 +591,6 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
active_sampler_->SampleStack(&sample);
}
- // We always sample the VM state.
- sample.state = Logger::state();
-
active_sampler_->Tick(&sample);
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index e890f94aad..f1812ff205 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -331,8 +331,8 @@ void OS::LogSharedLibraryAddresses() {
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
int c;
- if (attr_r == 'r' && attr_x == 'x') {
- // Found a readable and executable entry. Skip characters until we reach
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
// the beginning of the filename or the end of the line.
do {
c = getc(fp);
@@ -728,6 +728,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
@@ -760,9 +763,6 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
active_sampler_->SampleStack(&sample);
}
- // We always sample the VM state.
- sample.state = Logger::state();
-
active_sampler_->Tick(&sample);
#endif
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index e379ae2267..45029879f4 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -548,6 +548,9 @@ class Sampler::PlatformData : public Malloced {
while (sampler_->IsActive()) {
TickSample sample;
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& KERN_SUCCESS == thread_suspend(profiled_thread_)) {
@@ -585,8 +588,6 @@ class Sampler::PlatformData : public Malloced {
thread_resume(profiled_thread_);
}
- // We always sample the VM state.
- sample.state = Logger::state();
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 81b0d4c12e..04ffea968c 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1807,6 +1807,9 @@ class Sampler::PlatformData : public Malloced {
while (sampler_->IsActive()) {
TickSample sample;
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& SuspendThread(profiled_thread_) != (DWORD)-1) {
@@ -1826,8 +1829,6 @@ class Sampler::PlatformData : public Malloced {
ResumeThread(profiled_thread_);
}
- // We always sample the VM state.
- sample.state = Logger::state();
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index ca570a6487..6e2a60ec8d 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -604,7 +604,7 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->Print(StaticType::Type2String(expr->type()));
printed_first = true;
}
- if (expr->num() != Expression::kNoLabel) {
+ if (expr->num() != AstNode::kNoNumber) {
ast_printer_->Print(printed_first ? ", num = " : " (num = ");
ast_printer_->Print("%d", expr->num());
printed_first = true;
@@ -679,7 +679,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
pos += OS::SNPrintF(buf + pos, ", type = %s",
StaticType::Type2String(type));
}
- if (num != Expression::kNoLabel) {
+ if (num != AstNode::kNoNumber) {
pos += OS::SNPrintF(buf + pos, ", num = %d", num);
}
OS::SNPrintF(buf + pos, ")");
@@ -740,7 +740,7 @@ void AstPrinter::PrintParameters(Scope* scope) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
scope->parameter(i)->name(),
scope->parameter(i)->type(),
- Expression::kNoLabel);
+ AstNode::kNoNumber);
}
}
}
@@ -786,7 +786,7 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
node->proxy()->AsVariable(),
node->proxy()->name(),
node->proxy()->AsVariable()->type(),
- Expression::kNoLabel);
+ AstNode::kNoNumber);
} else {
// function declarations
PrintIndented("FUNCTION ");
diff --git a/deps/v8/src/regexp-delay.js b/deps/v8/src/regexp-delay.js
index 7bec455d37..843d0aa0eb 100644
--- a/deps/v8/src/regexp-delay.js
+++ b/deps/v8/src/regexp-delay.js
@@ -142,7 +142,7 @@ function DoRegExpExec(regexp, string, index) {
function RegExpExec(string) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError('method_called_on_incompatible',
+ throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.exec', this]);
}
if (%_ArgumentsLength() == 0) {
@@ -152,8 +152,12 @@ function RegExpExec(string) {
}
string = regExpInput;
}
- var s = ToString(string);
- var length = s.length;
+ var s;
+ if (IS_STRING(string)) {
+ s = string;
+ } else {
+ s = ToString(string);
+ }
var lastIndex = this.lastIndex;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
@@ -172,16 +176,23 @@ function RegExpExec(string) {
}
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
- var result = new $Array(numResults);
- for (var i = 0; i < numResults; i++) {
- var matchStart = lastMatchInfo[CAPTURE(i << 1)];
- var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
- if (matchStart != -1 && matchEnd != -1) {
- result[i] = SubString(s, matchStart, matchEnd);
- } else {
- // Make sure the element is present. Avoid reading the undefined
- // property from the global object since this may change.
- result[i] = void 0;
+ var result;
+ if (numResults === 1) {
+ var matchStart = lastMatchInfo[CAPTURE(0)];
+ var matchEnd = lastMatchInfo[CAPTURE(1)];
+ result = [SubString(s, matchStart, matchEnd)];
+ } else {
+ result = new $Array(numResults);
+ for (var i = 0; i < numResults; i++) {
+ var matchStart = lastMatchInfo[CAPTURE(i << 1)];
+ var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
+ if (matchStart != -1 && matchEnd != -1) {
+ result[i] = SubString(s, matchStart, matchEnd);
+ } else {
+ // Make sure the element is present. Avoid reading the undefined
+ // property from the global object since this may change.
+ result[i] = void 0;
+ }
}
}
@@ -199,7 +210,7 @@ function RegExpExec(string) {
// else implements.
function RegExpTest(string) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError('method_called_on_incompatible',
+ throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.test', this]);
}
if (%_ArgumentsLength() == 0) {
diff --git a/deps/v8/src/register-allocator-inl.h b/deps/v8/src/register-allocator-inl.h
index 718da36012..8453104ee2 100644
--- a/deps/v8/src/register-allocator-inl.h
+++ b/deps/v8/src/register-allocator-inl.h
@@ -103,6 +103,45 @@ void RegisterAllocator::Unuse(Register reg) {
registers_.Unuse(ToNumber(reg));
}
+
+NumberInfo Result::number_info() const {
+ ASSERT(is_valid());
+ if (!is_constant()) {
+ return NumberInfo::FromInt(NumberInfoField::decode(value_));
+ }
+ Handle<Object> value = handle();
+ if (value->IsSmi()) return NumberInfo::Smi();
+ if (value->IsHeapNumber()) return NumberInfo::HeapNumber();
+ return NumberInfo::Unknown();
+}
+
+
+void Result::set_number_info(NumberInfo info) {
+ ASSERT(is_valid());
+ value_ &= ~NumberInfoField::mask();
+ value_ |= NumberInfoField::encode(info.ToInt());
+}
+
+
+bool Result::is_number() const {
+ return number_info().IsNumber();
+}
+
+
+bool Result::is_smi() const {
+ return number_info().IsSmi();
+}
+
+
+bool Result::is_integer32() const {
+ return number_info().IsInteger32();
+}
+
+
+bool Result::is_heap_number() const {
+ return number_info().IsHeapNumber();
+}
+
} } // namespace v8::internal
#endif // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
index 1655b4ac12..64e4428792 100644
--- a/deps/v8/src/register-allocator.cc
+++ b/deps/v8/src/register-allocator.cc
@@ -38,11 +38,11 @@ namespace internal {
// Result implementation.
-Result::Result(Register reg, NumberInfo::Type info) {
+Result::Result(Register reg, NumberInfo info) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
value_ = TypeField::encode(REGISTER)
- | NumberInfoField::encode(info)
+ | NumberInfoField::encode(info.ToInt())
| DataField::encode(reg.code_);
}
@@ -53,23 +53,6 @@ Result::ZoneObjectList* Result::ConstantList() {
}
-NumberInfo::Type Result::number_info() {
- ASSERT(is_valid());
- if (!is_constant()) return NumberInfoField::decode(value_);
- Handle<Object> value = handle();
- if (value->IsSmi()) return NumberInfo::kSmi;
- if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
- return NumberInfo::kUnknown;
-}
-
-
-void Result::set_number_info(NumberInfo::Type info) {
- ASSERT(is_valid());
- value_ = value_ & ~NumberInfoField::mask();
- value_ = value_ | NumberInfoField::encode(info);
-}
-
-
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h
index 747200a056..2a7b820a53 100644
--- a/deps/v8/src/register-allocator.h
+++ b/deps/v8/src/register-allocator.h
@@ -65,12 +65,12 @@ class Result BASE_EMBEDDED {
Result() { invalidate(); }
// Construct a register Result.
- explicit Result(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+ explicit Result(Register reg, NumberInfo info = NumberInfo::Unknown());
// Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) {
value_ = TypeField::encode(CONSTANT)
- | NumberInfoField::encode(NumberInfo::kUninitialized)
+ | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@@ -101,13 +101,12 @@ class Result BASE_EMBEDDED {
void invalidate() { value_ = TypeField::encode(INVALID); }
- NumberInfo::Type number_info();
- void set_number_info(NumberInfo::Type info);
- bool is_number() {
- return (number_info() & NumberInfo::kNumber) != 0;
- }
- bool is_smi() { return number_info() == NumberInfo::kSmi; }
- bool is_heap_number() { return number_info() == NumberInfo::kHeapNumber; }
+ inline NumberInfo number_info() const;
+ inline void set_number_info(NumberInfo info);
+ inline bool is_number() const;
+ inline bool is_smi() const;
+ inline bool is_integer32() const;
+ inline bool is_heap_number() const;
bool is_valid() const { return type() != INVALID; }
bool is_register() const { return type() == REGISTER; }
@@ -140,8 +139,8 @@ class Result BASE_EMBEDDED {
uint32_t value_;
class TypeField: public BitField<Type, 0, 2> {};
- class NumberInfoField : public BitField<NumberInfo::Type, 2, 3> {};
- class DataField: public BitField<uint32_t, 5, 32 - 5> {};
+ class NumberInfoField : public BitField<int, 2, 4> {};
+ class DataField: public BitField<uint32_t, 6, 32 - 6> {};
inline void CopyTo(Result* destination) const;
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index b05cfae309..8a3221280c 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -244,6 +244,12 @@ void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
!Heap::result_symbol()->Equals(*var->name())) {
func_name_inferrer_.PushName(var->name());
}
+
+ if (FLAG_safe_int32_compiler) {
+ if (var->IsStackAllocated() && !var->is_arguments()) {
+ node->set_side_effect_free(true);
+ }
+ }
}
}
@@ -252,11 +258,14 @@ void AstOptimizer::VisitLiteral(Literal* node) {
Handle<Object> literal = node->handle();
if (literal->IsSmi()) {
node->type()->SetAsLikelySmi();
+ node->set_side_effect_free(true);
} else if (literal->IsString()) {
Handle<String> lit_str(Handle<String>::cast(literal));
if (!Heap::prototype_symbol()->Equals(*lit_str)) {
func_name_inferrer_.PushName(lit_str);
}
+ } else if (literal->IsHeapNumber()) {
+ node->set_side_effect_free(true);
}
}
@@ -414,6 +423,27 @@ void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
Visit(node->expression());
+ if (FLAG_safe_int32_compiler) {
+ switch (node->op()) {
+ case Token::BIT_NOT:
+ node->expression()->set_to_int32(true);
+ // Fall through.
+ case Token::ADD:
+ case Token::SUB:
+ case Token::NOT:
+ node->set_side_effect_free(node->expression()->side_effect_free());
+ break;
+ case Token::DELETE:
+ case Token::TYPEOF:
+ case Token::VOID:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (node->op() == Token::BIT_NOT) {
+ node->expression()->set_to_int32(true);
+ }
}
@@ -442,6 +472,8 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
node->type()->SetAsLikelySmiIfUnknown();
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
+ node->left()->set_to_int32(true);
+ node->right()->set_to_int32(true);
break;
case Token::ADD:
case Token::SUB:
@@ -483,6 +515,32 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
}
}
}
+
+ if (FLAG_safe_int32_compiler) {
+ switch (node->op()) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ node->set_side_effect_free(node->left()->side_effect_free() &&
+ node->right()->side_effect_free());
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 6459aa78d1..6d3a158068 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -38,6 +38,7 @@
#include "debug.h"
#include "execution.h"
#include "jsregexp.h"
+#include "liveedit.h"
#include "parser.h"
#include "platform.h"
#include "runtime.h"
@@ -1230,6 +1231,17 @@ static Object* Runtime_RegExpExec(Arguments args) {
}
+static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSArray, prototype, 0);
+ // This is necessary to enable fast checks for absence of elements
+ // on Array.prototype and below.
+ prototype->set_elements(Heap::empty_fixed_array());
+ return Smi::FromInt(0);
+}
+
+
static Object* Runtime_MaterializeRegExpLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
@@ -2608,8 +2620,8 @@ static Object* Runtime_StringLocaleCompare(Arguments args) {
int d = str1->Get(0) - str2->Get(0);
if (d != 0) return Smi::FromInt(d);
- str1->TryFlattenIfNotFlat();
- str2->TryFlattenIfNotFlat();
+ str1->TryFlatten();
+ str2->TryFlatten();
static StringInputBuffer buf1;
static StringInputBuffer buf2;
@@ -2818,7 +2830,7 @@ static Object* Runtime_NumberToPrecision(Arguments args) {
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
if (index < static_cast<uint32_t>(string->length())) {
- string->TryFlattenIfNotFlat();
+ string->TryFlatten();
return LookupSingleCharacterStringFromCode(
string->Get(index));
}
@@ -2846,6 +2858,11 @@ Object* Runtime::GetElementOrCharAt(Handle<Object> object, uint32_t index) {
return prototype->GetElement(index);
}
+ return GetElement(object, index);
+}
+
+
+Object* Runtime::GetElement(Handle<Object> object, uint32_t index) {
return object->GetElement(index);
}
@@ -3072,7 +3089,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
result = SetElement(js_object, index, value);
} else {
Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlattenIfNotFlat();
+ key_string->TryFlatten();
result = SetProperty(js_object, key_string, value, attr);
}
if (result.is_null()) return Failure::Exception();
@@ -3121,7 +3138,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
return js_object->SetElement(index, *value);
} else {
Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlattenIfNotFlat();
+ key_string->TryFlatten();
return js_object->IgnoreAttributesAndSetLocalProperty(*key_string,
*value,
attr);
@@ -3173,7 +3190,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
key_string = Handle<String>::cast(converted);
}
- key_string->TryFlattenIfNotFlat();
+ key_string->TryFlatten();
return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
}
@@ -3669,7 +3686,7 @@ static Object* Runtime_StringToNumber(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, subject, args[0]);
- subject->TryFlattenIfNotFlat();
+ subject->TryFlatten();
return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
}
@@ -3751,7 +3768,7 @@ static Object* Runtime_URIEscape(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, source, args[0]);
- source->TryFlattenIfNotFlat();
+ source->TryFlatten();
int escaped_length = 0;
int length = source->length();
@@ -3864,7 +3881,7 @@ static Object* Runtime_URIUnescape(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, source, args[0]);
- source->TryFlattenIfNotFlat();
+ source->TryFlatten();
bool ascii = true;
int length = source->length();
@@ -3904,7 +3921,7 @@ static Object* Runtime_StringParseInt(Arguments args) {
CONVERT_CHECKED(String, s, args[0]);
CONVERT_SMI_CHECKED(radix, args[1]);
- s->TryFlattenIfNotFlat();
+ s->TryFlatten();
int len = s->length();
int i;
@@ -4068,18 +4085,83 @@ static Object* ConvertCaseHelper(String* s,
}
-template <class Converter>
-static Object* ConvertCase(Arguments args,
- unibrow::Mapping<Converter, 128>* mapping) {
- NoHandleAllocation ha;
+static inline SeqAsciiString* TryGetSeqAsciiString(String* s) {
+ if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL;
+ if (s->IsConsString()) {
+ ASSERT(ConsString::cast(s)->second()->length() == 0);
+ return SeqAsciiString::cast(ConsString::cast(s)->first());
+ }
+ return SeqAsciiString::cast(s);
+}
+
+
+namespace {
+
+struct ToLowerTraits {
+ typedef unibrow::ToLowercase UnibrowConverter;
+
+ static bool ConvertAscii(char* dst, char* src, int length) {
+ bool changed = false;
+ for (int i = 0; i < length; ++i) {
+ char c = src[i];
+ if ('A' <= c && c <= 'Z') {
+ c += ('a' - 'A');
+ changed = true;
+ }
+ dst[i] = c;
+ }
+ return changed;
+ }
+};
+
+
+struct ToUpperTraits {
+ typedef unibrow::ToUppercase UnibrowConverter;
+
+ static bool ConvertAscii(char* dst, char* src, int length) {
+ bool changed = false;
+ for (int i = 0; i < length; ++i) {
+ char c = src[i];
+ if ('a' <= c && c <= 'z') {
+ c -= ('a' - 'A');
+ changed = true;
+ }
+ dst[i] = c;
+ }
+ return changed;
+ }
+};
+
+} // namespace
+
+template <typename ConvertTraits>
+static Object* ConvertCase(
+ Arguments args,
+ unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
+ NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- s->TryFlattenIfNotFlat();
+ s->TryFlatten();
- int input_string_length = s->length();
+ const int length = s->length();
// Assume that the string is not empty; we need this assumption later
- if (input_string_length == 0) return s;
- int length = input_string_length;
+ if (length == 0) return s;
+
+ // Simpler handling of ascii strings.
+ //
+ // NOTE: This assumes that the upper/lower case of an ascii
+ // character is also ascii. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s);
+ if (seq_ascii != NULL) {
+ Object* o = Heap::AllocateRawAsciiString(length);
+ if (o->IsFailure()) return o;
+ SeqAsciiString* result = SeqAsciiString::cast(o);
+ bool has_changed_character = ConvertTraits::ConvertAscii(
+ result->GetChars(), seq_ascii->GetChars(), length);
+ return has_changed_character ? result : s;
+ }
Object* answer = ConvertCaseHelper(s, length, length, mapping);
if (answer->IsSmi()) {
@@ -4091,18 +4173,20 @@ static Object* ConvertCase(Arguments args,
static Object* Runtime_StringToLowerCase(Arguments args) {
- return ConvertCase<unibrow::ToLowercase>(args, &to_lower_mapping);
+ return ConvertCase<ToLowerTraits>(args, &to_lower_mapping);
}
static Object* Runtime_StringToUpperCase(Arguments args) {
- return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
+ return ConvertCase<ToUpperTraits>(args, &to_upper_mapping);
}
+
static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
return unibrow::WhiteSpace::Is(c) || c == 0x200b;
}
+
static Object* Runtime_StringTrim(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
@@ -4111,7 +4195,7 @@ static Object* Runtime_StringTrim(Arguments args) {
CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
- s->TryFlattenIfNotFlat();
+ s->TryFlatten();
int length = s->length();
int left = 0;
@@ -4130,6 +4214,82 @@ static Object* Runtime_StringTrim(Arguments args) {
return s->SubString(left, right);
}
+
+// Copies ascii characters to the given fixed array looking up
+// one-char strings in the cache. Gives up on the first char that is
+// not in the cache and fills the remainder with smi zeros. Returns
+// the length of the successfully copied prefix.
+static int CopyCachedAsciiCharsToArray(const char* chars,
+ FixedArray* elements,
+ int length) {
+ AssertNoAllocation nogc;
+ FixedArray* ascii_cache = Heap::single_character_string_cache();
+ Object* undefined = Heap::undefined_value();
+ int i;
+ for (i = 0; i < length; ++i) {
+ Object* value = ascii_cache->get(chars[i]);
+ if (value == undefined) break;
+ ASSERT(!Heap::InNewSpace(value));
+ elements->set(i, value, SKIP_WRITE_BARRIER);
+ }
+ if (i < length) {
+ ASSERT(Smi::FromInt(0) == 0);
+ memset(elements->data_start() + i, 0, kPointerSize * (length - i));
+ }
+#ifdef DEBUG
+ for (int j = 0; j < length; ++j) {
+ Object* element = elements->get(j);
+ ASSERT(element == Smi::FromInt(0) ||
+ (element->IsString() && String::cast(element)->LooksValid()));
+ }
+#endif
+ return i;
+}
+
+
+// Converts a String to JSArray.
+// For example, "foo" => ["f", "o", "o"].
+static Object* Runtime_StringToArray(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(String, s, 0);
+
+ s->TryFlatten();
+ const int length = s->length();
+
+ Handle<FixedArray> elements;
+ if (s->IsFlat() && s->IsAsciiRepresentation()) {
+ Object* obj = Heap::AllocateUninitializedFixedArray(length);
+ if (obj->IsFailure()) return obj;
+ elements = Handle<FixedArray>(FixedArray::cast(obj));
+
+ Vector<const char> chars = s->ToAsciiVector();
+ // Note, this will initialize all elements (not only the prefix)
+ // to prevent GC from seeing partially initialized array.
+ int num_copied_from_cache = CopyCachedAsciiCharsToArray(chars.start(),
+ *elements,
+ length);
+
+ for (int i = num_copied_from_cache; i < length; ++i) {
+ elements->set(i, *LookupSingleCharacterStringFromCode(chars[i]));
+ }
+ } else {
+ elements = Factory::NewFixedArray(length);
+ for (int i = 0; i < length; ++i) {
+ elements->set(i, *LookupSingleCharacterStringFromCode(s->Get(i)));
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < length; ++i) {
+ ASSERT(String::cast(elements->get(i))->length() == 1);
+ }
+#endif
+
+ return *Factory::NewJSArrayWithElements(elements);
+}
+
+
bool Runtime::IsUpperCaseChar(uint16_t ch) {
unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
int char_length = to_upper_mapping.get(ch, 0, chars);
@@ -4152,9 +4312,12 @@ static Object* Runtime_NumberToInteger(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Object* obj = args[0];
- if (obj->IsSmi()) return obj;
- CONVERT_DOUBLE_CHECKED(number, obj);
+ CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+ // We do not include 0 so that we don't have to treat +0 / -0 cases.
+ if (number > 0 && number <= Smi::kMaxValue) {
+ return Smi::FromInt(static_cast<int>(number));
+ }
return Heap::NumberFromDouble(DoubleToInteger(number));
}
@@ -4163,9 +4326,7 @@ static Object* Runtime_NumberToJSUint32(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Object* obj = args[0];
- if (obj->IsSmi() && Smi::cast(obj)->value() >= 0) return obj;
- CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, obj);
+ CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
return Heap::NumberFromUint32(number);
}
@@ -4174,9 +4335,12 @@ static Object* Runtime_NumberToJSInt32(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Object* obj = args[0];
- if (obj->IsSmi()) return obj;
- CONVERT_DOUBLE_CHECKED(number, obj);
+ CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+ // We do not include 0 so that we don't have to treat +0 / -0 cases.
+ if (number > 0 && number <= Smi::kMaxValue) {
+ return Smi::FromInt(static_cast<int>(number));
+ }
return Heap::NumberFromInt32(DoubleToInt32(number));
}
@@ -4593,6 +4757,66 @@ static Object* Runtime_SmiLexicographicCompare(Arguments args) {
}
+static Object* StringInputBufferCompare(String* x, String* y) {
+ static StringInputBuffer bufx;
+ static StringInputBuffer bufy;
+ bufx.Reset(x);
+ bufy.Reset(y);
+ while (bufx.has_more() && bufy.has_more()) {
+ int d = bufx.GetNext() - bufy.GetNext();
+ if (d < 0) return Smi::FromInt(LESS);
+ else if (d > 0) return Smi::FromInt(GREATER);
+ }
+
+ // x is (non-trivial) prefix of y:
+ if (bufy.has_more()) return Smi::FromInt(LESS);
+ // y is prefix of x:
+ return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+}
+
+
+static Object* FlatStringCompare(String* x, String* y) {
+ ASSERT(x->IsFlat());
+ ASSERT(y->IsFlat());
+ Object* equal_prefix_result = Smi::FromInt(EQUAL);
+ int prefix_length = x->length();
+ if (y->length() < prefix_length) {
+ prefix_length = y->length();
+ equal_prefix_result = Smi::FromInt(GREATER);
+ } else if (y->length() > prefix_length) {
+ equal_prefix_result = Smi::FromInt(LESS);
+ }
+ int r;
+ if (x->IsAsciiRepresentation()) {
+ Vector<const char> x_chars = x->ToAsciiVector();
+ if (y->IsAsciiRepresentation()) {
+ Vector<const char> y_chars = y->ToAsciiVector();
+ r = memcmp(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y->ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ } else {
+ Vector<const uc16> x_chars = x->ToUC16Vector();
+ if (y->IsAsciiRepresentation()) {
+ Vector<const char> y_chars = y->ToAsciiVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y->ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ }
+ Object* result;
+ if (r == 0) {
+ result = equal_prefix_result;
+ } else {
+ result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
+ }
+ ASSERT(result == StringInputBufferCompare(x, y));
+ return result;
+}
+
+
static Object* Runtime_StringCompare(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -4615,29 +4839,18 @@ static Object* Runtime_StringCompare(Arguments args) {
if (d < 0) return Smi::FromInt(LESS);
else if (d > 0) return Smi::FromInt(GREATER);
- x->TryFlattenIfNotFlat();
- y->TryFlattenIfNotFlat();
-
- static StringInputBuffer bufx;
- static StringInputBuffer bufy;
- bufx.Reset(x);
- bufy.Reset(y);
- while (bufx.has_more() && bufy.has_more()) {
- int d = bufx.GetNext() - bufy.GetNext();
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
- }
+ x->TryFlatten();
+ y->TryFlatten();
- // x is (non-trivial) prefix of y:
- if (bufy.has_more()) return Smi::FromInt(LESS);
- // y is prefix of x:
- return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+ return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
+ : StringInputBufferCompare(x, y);
}
static Object* Runtime_Math_abs(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_abs.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(fabs(x));
@@ -4647,6 +4860,7 @@ static Object* Runtime_Math_abs(Arguments args) {
static Object* Runtime_Math_acos(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_acos.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
@@ -4656,6 +4870,7 @@ static Object* Runtime_Math_acos(Arguments args) {
static Object* Runtime_Math_asin(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_asin.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
@@ -4665,6 +4880,7 @@ static Object* Runtime_Math_asin(Arguments args) {
static Object* Runtime_Math_atan(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_atan.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
@@ -4674,6 +4890,7 @@ static Object* Runtime_Math_atan(Arguments args) {
static Object* Runtime_Math_atan2(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ Counters::math_atan2.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
@@ -4697,6 +4914,7 @@ static Object* Runtime_Math_atan2(Arguments args) {
static Object* Runtime_Math_ceil(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_ceil.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::NumberFromDouble(ceiling(x));
@@ -4706,6 +4924,7 @@ static Object* Runtime_Math_ceil(Arguments args) {
static Object* Runtime_Math_cos(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_cos.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::COS, x);
@@ -4715,6 +4934,7 @@ static Object* Runtime_Math_cos(Arguments args) {
static Object* Runtime_Math_exp(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_exp.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::EXP, x);
@@ -4724,6 +4944,7 @@ static Object* Runtime_Math_exp(Arguments args) {
static Object* Runtime_Math_floor(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_floor.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::NumberFromDouble(floor(x));
@@ -4733,6 +4954,7 @@ static Object* Runtime_Math_floor(Arguments args) {
static Object* Runtime_Math_log(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_log.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::LOG, x);
@@ -4773,6 +4995,7 @@ static double powi(double x, int y) {
static Object* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ Counters::math_pow.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
@@ -4807,10 +5030,27 @@ static Object* Runtime_Math_pow(Arguments args) {
}
}
+// Fast version of Math.pow if we know that y is not an integer and
+// y is not -0.5 or 0.5. Used as slowcase from codegen.
+static Object* Runtime_Math_pow_cfunction(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ if (y == 0) {
+ return Smi::FromInt(1);
+ } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+ return Heap::nan_value();
+ } else {
+ return Heap::AllocateHeapNumber(pow(x, y));
+ }
+}
+
static Object* Runtime_Math_round(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_round.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
@@ -4823,6 +5063,7 @@ static Object* Runtime_Math_round(Arguments args) {
static Object* Runtime_Math_sin(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_sin.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::SIN, x);
@@ -4832,6 +5073,7 @@ static Object* Runtime_Math_sin(Arguments args) {
static Object* Runtime_Math_sqrt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_sqrt.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(sqrt(x));
@@ -4841,12 +5083,65 @@ static Object* Runtime_Math_sqrt(Arguments args) {
static Object* Runtime_Math_tan(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
+ Counters::math_tan.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return TranscendentalCache::Get(TranscendentalCache::TAN, x);
}
+static Object* Runtime_DateMakeDay(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_SMI_CHECKED(year, args[0]);
+ CONVERT_SMI_CHECKED(month, args[1]);
+ CONVERT_SMI_CHECKED(date, args[2]);
+
+ static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+
+ year += month / 12;
+ month %= 12;
+ if (month < 0) {
+ year--;
+ month += 12;
+ }
+
+ ASSERT(month >= 0);
+ ASSERT(month < 12);
+
+ // year_delta is an arbitrary number such that:
+ // a) year_delta = -1 (mod 400)
+ // b) year + year_delta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be overflow for 32-bit integers in the following
+ // operations.
+ static const int year_delta = 399999;
+ static const int base_day = 365 * (1970 + year_delta) +
+ (1970 + year_delta) / 4 -
+ (1970 + year_delta) / 100 +
+ (1970 + year_delta) / 400;
+
+ int year1 = year + year_delta;
+ int day_from_year = 365 * year1 +
+ year1 / 4 -
+ year1 / 100 +
+ year1 / 400 -
+ base_day;
+
+ if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
+ return Smi::FromInt(day_from_year + day_from_month[month] + date - 1);
+ }
+
+ return Smi::FromInt(day_from_year + day_from_month_leap[month] + date - 1);
+}
+
+
static Object* Runtime_NewArgumentsFast(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
@@ -5448,6 +5743,7 @@ static Object* Runtime_DebugPrint(Arguments args) {
}
args[0]->Print();
if (args[0]->IsHeapObject()) {
+ PrintF("\n");
HeapObject::cast(args[0])->map()->Print();
}
#else
@@ -7996,6 +8292,195 @@ static Object* Runtime_FunctionGetInferredName(Arguments args) {
return f->shared()->inferred_name();
}
+
+static int FindSharedFunctionInfosForScript(Script* script,
+ FixedArray* buffer) {
+ AssertNoAllocation no_allocations;
+
+ int counter = 0;
+ int buffer_size = buffer->length();
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ ASSERT(obj != NULL);
+ if (!obj->IsSharedFunctionInfo()) {
+ continue;
+ }
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->script() != script) {
+ continue;
+ }
+ if (counter < buffer_size) {
+ buffer->set(counter, shared);
+ }
+ counter++;
+ }
+ return counter;
+}
+
+// For a script finds all SharedFunctionInfo's in the heap that points
+// to this script. Returns JSArray of SharedFunctionInfo wrapped
+// in OpaqueReferences.
+static Object* Runtime_LiveEditFindSharedFunctionInfosForScript(
+ Arguments args) {
+ ASSERT(args.length() == 1);
+ HandleScope scope;
+ CONVERT_CHECKED(JSValue, script_value, args[0]);
+
+ Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
+
+ const int kBufferSize = 32;
+
+ Handle<FixedArray> array;
+ array = Factory::NewFixedArray(kBufferSize);
+ int number = FindSharedFunctionInfosForScript(*script, *array);
+ if (number > kBufferSize) {
+ array = Factory::NewFixedArray(number);
+ FindSharedFunctionInfosForScript(*script, *array);
+ }
+
+ Handle<JSArray> result = Factory::NewJSArrayWithElements(array);
+ result->set_length(Smi::FromInt(number));
+
+ LiveEdit::WrapSharedFunctionInfos(result);
+
+ return *result;
+}
+
+// For a script calculates compilation information about all its functions.
+// The script source is explicitly specified by the second argument.
+// The source of the actual script is not used, however it is important that
+// all generated code keeps references to this particular instance of script.
+// Returns a JSArray of compilation infos. The array is ordered so that
+// each function with all its descendant is always stored in a continues range
+// with the function itself going first. The root function is a script function.
+static Object* Runtime_LiveEditGatherCompileInfo(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ CONVERT_CHECKED(JSValue, script, args[0]);
+ CONVERT_ARG_CHECKED(String, source, 1);
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
+
+ if (Top::has_pending_exception()) {
+ return Failure::Exception();
+ }
+
+ return result;
+}
+
+// Changes the source of the script to a new_source and creates a new
+// script representing the old version of the script source.
+static Object* Runtime_LiveEditReplaceScript(Arguments args) {
+ ASSERT(args.length() == 3);
+ HandleScope scope;
+ CONVERT_CHECKED(JSValue, original_script_value, args[0]);
+ CONVERT_ARG_CHECKED(String, new_source, 1);
+ CONVERT_ARG_CHECKED(String, old_script_name, 2);
+ Handle<Script> original_script =
+ Handle<Script>(Script::cast(original_script_value->value()));
+
+ Handle<String> original_source(String::cast(original_script->source()));
+
+ original_script->set_source(*new_source);
+ Handle<Script> old_script = Factory::NewScript(original_source);
+ old_script->set_name(*old_script_name);
+ old_script->set_line_offset(original_script->line_offset());
+ old_script->set_column_offset(original_script->column_offset());
+ old_script->set_data(original_script->data());
+ old_script->set_type(original_script->type());
+ old_script->set_context_data(original_script->context_data());
+ old_script->set_compilation_type(original_script->compilation_type());
+ old_script->set_eval_from_shared(original_script->eval_from_shared());
+ old_script->set_eval_from_instructions_offset(
+ original_script->eval_from_instructions_offset());
+
+
+ Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
+
+ return *(GetScriptWrapper(old_script));
+}
+
+// Replaces code of SharedFunctionInfo with a new one.
+static Object* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
+ CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
+
+ LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
+
+ return Heap::undefined_value();
+}
+
+// Connects SharedFunctionInfo to another script.
+static Object* Runtime_LiveEditRelinkFunctionToScript(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSArray, shared_info_array, 0);
+ CONVERT_ARG_CHECKED(JSValue, script_value, 1);
+ Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
+
+ LiveEdit::RelinkFunctionToScript(shared_info_array, script);
+
+ return Heap::undefined_value();
+}
+
+// Updates positions of a shared function info (first parameter) according
+// to script source change. Text change is described in second parameter as
+// array of groups of 3 numbers:
+// (change_begin, change_end, change_end_new_position).
+// Each group describes a change in text; groups are sorted by change_begin.
+static Object* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+ CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
+
+ LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
+
+ return Heap::undefined_value();
+}
+
+
+static LiveEdit::FunctionPatchabilityStatus FindFunctionCodeOnStacks(
+ Handle<SharedFunctionInfo> shared) {
+ // TODO(635): check all threads, not only the current one.
+ for (StackFrameIterator it; !it.done(); it.Advance()) {
+ StackFrame* frame = it.frame();
+ if (frame->code() == shared->code()) {
+ return LiveEdit::FUNCTION_BLOCKED_ON_STACK;
+ }
+ }
+ return LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH;
+}
+
+// For array of SharedFunctionInfo's (each wrapped in JSValue)
+// checks that none of them have activations on stacks (of any thread).
+// Returns array of the same length with corresponding results of
+// LiveEdit::FunctionPatchabilityStatus type.
+static Object* Runtime_LiveEditCheckStackActivations(Arguments args) {
+ ASSERT(args.length() == 1);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+
+
+ int len = Smi::cast(shared_array->length())->value();
+ Handle<JSArray> result = Factory::NewJSArray(len);
+
+ for (int i = 0; i < len; i++) {
+ JSValue* wrapper = JSValue::cast(shared_array->GetElement(i));
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(wrapper->value()));
+ LiveEdit::FunctionPatchabilityStatus check_res =
+ FindFunctionCodeOnStacks(shared);
+ SetElement(result, i, Handle<Smi>(Smi::FromInt(check_res)));
+ }
+
+ return *result;
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index e2e5c22124..8c2f86dfd7 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -60,6 +60,7 @@ namespace internal {
F(GetArgumentsProperty, 1, 1) \
F(ToFastProperties, 1, 1) \
F(ToSlowProperties, 1, 1) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
\
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
@@ -142,6 +143,7 @@ namespace internal {
F(Math_floor, 1, 1) \
F(Math_log, 1, 1) \
F(Math_pow, 2, 1) \
+ F(Math_pow_cfunction, 2, 1) \
F(Math_round, 1, 1) \
F(Math_sin, 1, 1) \
F(Math_sqrt, 1, 1) \
@@ -161,6 +163,7 @@ namespace internal {
F(StringReplaceRegExpWithString, 4, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
+ F(StringToArray, 1, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -200,6 +203,7 @@ namespace internal {
F(DateLocalTimezone, 1, 1) \
F(DateLocalTimeOffset, 0, 1) \
F(DateDaylightSavingsOffset, 1, 1) \
+ F(DateMakeDay, 3, 1) \
\
/* Numbers */ \
F(NumberIsFinite, 1, 1) \
@@ -321,7 +325,14 @@ namespace internal {
F(SystemBreak, 0, 1) \
F(DebugDisassembleFunction, 1, 1) \
F(DebugDisassembleConstructor, 1, 1) \
- F(FunctionGetInferredName, 1, 1)
+ F(FunctionGetInferredName, 1, 1) \
+ F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
+ F(LiveEditGatherCompileInfo, 2, 1) \
+ F(LiveEditReplaceScript, 3, 1) \
+ F(LiveEditReplaceFunctionCode, 2, 1) \
+ F(LiveEditRelinkFunctionToScript, 2, 1) \
+ F(LiveEditPatchFunctionPositions, 2, 1) \
+ F(LiveEditCheckStackActivations, 1, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
@@ -400,6 +411,7 @@ class Runtime : public AllStatic {
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
static Object* GetElementOrCharAt(Handle<Object> object, uint32_t index);
+ static Object* GetElement(Handle<Object> object, uint32_t index);
static Object* SetObjectProperty(Handle<Object> object,
Handle<Object> key,
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index e9d9848772..d7770414a3 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -478,7 +478,7 @@ function STRING_CHAR_AT(pos) {
if (!%_IsSmi(char_code)) {
return %StringCharAt(this, pos);
}
- return %CharFromCode(char_code);
+ return %_CharFromCode(char_code);
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index cf7e49f850..8943119019 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "handles.h"
#include "scanner.h"
namespace v8 {
@@ -86,12 +87,7 @@ void UTF8Buffer::AddCharSlow(uc32 c) {
UTF16Buffer::UTF16Buffer()
- : pos_(0), size_(0) { }
-
-
-Handle<String> UTF16Buffer::SubString(int start, int end) {
- return internal::SubString(data_, start, end);
-}
+ : pos_(0), end_(Scanner::kNoEndPosition) { }
// CharacterStreamUTF16Buffer
@@ -100,10 +96,14 @@ CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
- unibrow::CharacterStream* input) {
- data_ = data;
- pos_ = 0;
+ unibrow::CharacterStream* input,
+ int start_position,
+ int end_position) {
stream_ = input;
+ if (start_position > 0) {
+ SeekForward(start_position);
+ }
+ end_ = end_position != Scanner::kNoEndPosition ? end_position : kMaxInt;
}
@@ -115,6 +115,8 @@ void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
uc32 CharacterStreamUTF16Buffer::Advance() {
+ ASSERT(end_ != Scanner::kNoEndPosition);
+ ASSERT(end_ >= 0);
// NOTE: It is of importance to Persian / Farsi resources that we do
// *not* strip format control characters in the scanner; see
//
@@ -126,7 +128,7 @@ uc32 CharacterStreamUTF16Buffer::Advance() {
if (!pushback_buffer()->is_empty()) {
pos_++;
return last_ = pushback_buffer()->RemoveLast();
- } else if (stream_->has_more()) {
+ } else if (stream_->has_more() && pos_ < end_) {
pos_++;
uc32 next = stream_->GetNext();
return last_ = next;
@@ -146,25 +148,32 @@ void CharacterStreamUTF16Buffer::SeekForward(int pos) {
}
-// TwoByteStringUTF16Buffer
-TwoByteStringUTF16Buffer::TwoByteStringUTF16Buffer()
+// ExternalStringUTF16Buffer
+template <typename StringType, typename CharType>
+ExternalStringUTF16Buffer<StringType, CharType>::ExternalStringUTF16Buffer()
: raw_data_(NULL) { }
-void TwoByteStringUTF16Buffer::Initialize(
- Handle<ExternalTwoByteString> data) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::Initialize(
+ Handle<StringType> data,
+ int start_position,
+ int end_position) {
ASSERT(!data.is_null());
-
- data_ = data;
- pos_ = 0;
-
raw_data_ = data->resource()->data();
- size_ = data->length();
+
+ ASSERT(end_position <= data->length());
+ if (start_position > 0) {
+ SeekForward(start_position);
+ }
+ end_ =
+ end_position != Scanner::kNoEndPosition ? end_position : data->length();
}
-uc32 TwoByteStringUTF16Buffer::Advance() {
- if (pos_ < size_) {
+template <typename StringType, typename CharType>
+uc32 ExternalStringUTF16Buffer<StringType, CharType>::Advance() {
+ if (pos_ < end_) {
return raw_data_[pos_++];
} else {
// note: currently the following increment is necessary to avoid a
@@ -175,14 +184,16 @@ uc32 TwoByteStringUTF16Buffer::Advance() {
}
-void TwoByteStringUTF16Buffer::PushBack(uc32 ch) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::PushBack(uc32 ch) {
pos_--;
ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
}
-void TwoByteStringUTF16Buffer::SeekForward(int pos) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
pos_ = pos;
}
@@ -327,21 +338,56 @@ Scanner::Scanner(ParserMode pre)
: stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
+void Scanner::Initialize(Handle<String> source,
+ ParserLanguage language) {
+ safe_string_input_buffer_.Reset(source.location());
+ Init(source, &safe_string_input_buffer_, 0, source->length(), language);
+}
+
+
+void Scanner::Initialize(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ ParserLanguage language) {
+ Init(source, stream, 0, kNoEndPosition, language);
+}
+
+
+void Scanner::Initialize(Handle<String> source,
+ int start_position,
+ int end_position,
+ ParserLanguage language) {
+ safe_string_input_buffer_.Reset(source.location());
+ Init(source, &safe_string_input_buffer_,
+ start_position, end_position, language);
+}
+
+
void Scanner::Init(Handle<String> source,
unibrow::CharacterStream* stream,
- int position,
+ int start_position,
+ int end_position,
ParserLanguage language) {
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
- Handle<ExternalTwoByteString>::cast(source));
+ Handle<ExternalTwoByteString>::cast(source),
+ start_position,
+ end_position);
source_ = &two_byte_string_buffer_;
+ } else if (!source.is_null() && StringShape(*source).IsExternalAscii()) {
+ ascii_string_buffer_.Initialize(
+ Handle<ExternalAsciiString>::cast(source),
+ start_position,
+ end_position);
+ source_ = &ascii_string_buffer_;
} else {
- char_stream_buffer_.Initialize(source, stream);
+ char_stream_buffer_.Initialize(source,
+ stream,
+ start_position,
+ end_position);
source_ = &char_stream_buffer_;
}
- position_ = position;
is_parsing_json_ = (language == JSON);
// Set c0_ (one character ahead)
@@ -358,11 +404,6 @@ void Scanner::Init(Handle<String> source,
}
-Handle<String> Scanner::SubString(int start, int end) {
- return source_->SubString(start - position_, end - position_);
-}
-
-
Token::Value Scanner::Next() {
// BUG 1215673: Find a thread safe way to set a stack limit in
// pre-parse mode. Otherwise, we cannot safely pre-parse from other
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index f0035c0eb3..d5efdff97c 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -84,32 +84,34 @@ class UTF8Buffer {
};
+// Interface through which the scanner reads characters from the input source.
class UTF16Buffer {
public:
UTF16Buffer();
virtual ~UTF16Buffer() {}
virtual void PushBack(uc32 ch) = 0;
- // returns a value < 0 when the buffer end is reached
+ // Returns a value < 0 when the buffer end is reached.
virtual uc32 Advance() = 0;
virtual void SeekForward(int pos) = 0;
int pos() const { return pos_; }
- int size() const { return size_; }
- Handle<String> SubString(int start, int end);
protected:
- Handle<String> data_;
- int pos_;
- int size_;
+ int pos_; // Current position in the buffer.
+ int end_; // Position where scanning should stop (EOF).
};
+// UTF16 buffer to read characters from a character stream.
class CharacterStreamUTF16Buffer: public UTF16Buffer {
public:
CharacterStreamUTF16Buffer();
virtual ~CharacterStreamUTF16Buffer() {}
- void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
+ void Initialize(Handle<String> data,
+ unibrow::CharacterStream* stream,
+ int start_position,
+ int end_position);
virtual void PushBack(uc32 ch);
virtual uc32 Advance();
virtual void SeekForward(int pos);
@@ -123,17 +125,21 @@ class CharacterStreamUTF16Buffer: public UTF16Buffer {
};
-class TwoByteStringUTF16Buffer: public UTF16Buffer {
+// UTF16 buffer to read characters from an external string.
+template <typename StringType, typename CharType>
+class ExternalStringUTF16Buffer: public UTF16Buffer {
public:
- TwoByteStringUTF16Buffer();
- virtual ~TwoByteStringUTF16Buffer() {}
- void Initialize(Handle<ExternalTwoByteString> data);
+ ExternalStringUTF16Buffer();
+ virtual ~ExternalStringUTF16Buffer() {}
+ void Initialize(Handle<StringType> data,
+ int start_position,
+ int end_position);
virtual void PushBack(uc32 ch);
virtual uc32 Advance();
virtual void SeekForward(int pos);
private:
- const uint16_t* raw_data_;
+ const CharType* raw_data_; // Pointer to the actual array of characters.
};
@@ -263,11 +269,15 @@ class Scanner {
// Construction
explicit Scanner(ParserMode parse_mode);
- // Initialize the Scanner to scan source:
- void Init(Handle<String> source,
- unibrow::CharacterStream* stream,
- int position,
- ParserLanguage language);
+ // Initialize the Scanner to scan source.
+ void Initialize(Handle<String> source,
+ ParserLanguage language);
+ void Initialize(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ ParserLanguage language);
+ void Initialize(Handle<String> source,
+ int start_position, int end_position,
+ ParserLanguage language);
// Returns the next token.
Token::Value Next();
@@ -335,7 +345,6 @@ class Scanner {
// tokens, which is what it is used for.
void SeekForward(int pos);
- Handle<String> SubString(int start_pos, int end_pos);
bool stack_overflow() { return stack_overflow_; }
static StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
@@ -350,14 +359,28 @@ class Scanner {
static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
static const int kCharacterLookaheadBufferSize = 1;
+ static const int kNoEndPosition = 1;
private:
+ void Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int start_position, int end_position,
+ ParserLanguage language);
+
+
+ // Different UTF16 buffers used to pull characters from. Based on input one of
+ // these will be initialized as the actual data source.
CharacterStreamUTF16Buffer char_stream_buffer_;
- TwoByteStringUTF16Buffer two_byte_string_buffer_;
+ ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
+ two_byte_string_buffer_;
+ ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
- // Source.
+ // Source. Will point to one of the buffers declared above.
UTF16Buffer* source_;
- int position_;
+
+ // Used to convert the source string into a character stream when a stream
+ // is not passed to the scanner.
+ SafeStringInputBuffer safe_string_input_buffer_;
// Buffer to hold literal values (identifiers, strings, numbers)
// using 0-terminated UTF-8 encoding.
@@ -460,7 +483,7 @@ class Scanner {
// Return the current source position.
int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize + position_;
+ return source_->pos() - kCharacterLookaheadBufferSize;
}
// Decodes a unicode escape-sequence which is part of an identifier.
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index de1841b027..8b7e2ad972 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -82,7 +82,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
List<Variable*, Allocator> heap_locals(locals.length());
for (int i = 0; i < locals.length(); i++) {
Variable* var = locals[i];
- if (var->var_uses()->is_used()) {
+ if (var->is_used()) {
Slot* slot = var->slot();
if (slot != NULL) {
switch (slot->type()) {
@@ -130,7 +130,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
if (scope->is_function_scope()) {
Variable* var = scope->function();
if (var != NULL &&
- var->var_uses()->is_used() &&
+ var->is_used() &&
var->slot()->type() == Slot::CONTEXT) {
function_name_ = var->name();
// Note that we must not find the function name in the context slot
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 28d169a394..927ac66fc4 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -29,6 +29,7 @@
#define V8_SCOPEINFO_H_
#include "variables.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 701e5e3edd..b55e5d5d7b 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -309,7 +309,7 @@ void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
// which is the current user of this function).
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
- if (var->var_uses()->is_used()) {
+ if (var->is_used()) {
locals->Add(var);
}
}
@@ -317,7 +317,7 @@ void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- if (var->var_uses()->is_used()) {
+ if (var->is_used()) {
locals->Add(var);
}
}
@@ -418,17 +418,16 @@ static void PrintName(Handle<String> name) {
static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
- if (var->var_uses()->is_used() || var->rewrite() != NULL) {
+ if (var->is_used() || var->rewrite() != NULL) {
Indent(indent, Variable::Mode2String(var->mode()));
PrintF(" ");
PrintName(var->name());
PrintF("; // ");
- if (var->rewrite() != NULL) PrintF("%s, ", printer->Print(var->rewrite()));
- if (var->is_accessed_from_inner_scope()) PrintF("inner scope access, ");
- PrintF("var ");
- var->var_uses()->Print();
- PrintF(", obj ");
- var->obj_uses()->Print();
+ if (var->rewrite() != NULL) {
+ PrintF("%s, ", printer->Print(var->rewrite()));
+ if (var->is_accessed_from_inner_scope()) PrintF(", ");
+ }
+ if (var->is_accessed_from_inner_scope()) PrintF("inner scope access");
PrintF("\n");
}
}
@@ -738,10 +737,10 @@ bool Scope::MustAllocate(Variable* var) {
(var->is_accessed_from_inner_scope_ ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_)) {
- var->var_uses()->RecordAccess(1);
+ var->set_is_used(true);
}
// Global variables do not need to be allocated.
- return !var->is_global() && var->var_uses()->is_used();
+ return !var->is_global() && var->is_used();
}
@@ -847,7 +846,7 @@ void Scope::AllocateParameterLocals() {
new Literal(Handle<Object>(Smi::FromInt(i))),
RelocInfo::kNoPosition,
Property::SYNTHETIC);
- arguments_shadow->var_uses()->RecordUses(var->var_uses());
+ if (var->is_used()) arguments_shadow->set_is_used(true);
}
}
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 0819ec2244..a95a7d6aa7 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -359,79 +359,87 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
7,
"Heap::NewSpaceStart()");
- Add(ExternalReference::heap_always_allocate_scope_depth().address(),
+ Add(ExternalReference::new_space_mask().address(),
UNCLASSIFIED,
8,
+ "Heap::NewSpaceMask()");
+ Add(ExternalReference::heap_always_allocate_scope_depth().address(),
+ UNCLASSIFIED,
+ 9,
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address().address(),
UNCLASSIFIED,
- 9,
+ 10,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
- 10,
+ 11,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
- 11,
+ 12,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 12,
+ 13,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
- 13,
+ 14,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
- 14,
+ 15,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
- 15,
+ 16,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
- 16,
+ 17,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
- 17,
+ 18,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
- 18,
+ 19,
"compare_doubles");
#ifdef V8_NATIVE_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
- 19,
+ 20,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
- 20,
+ 21,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
- 21,
+ 22,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
- 22,
+ 23,
"NativeRegExpMacroAssembler::word_character_map");
#endif
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
- 23,
+ 24,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
- 24,
+ 25,
"KeyedLookupCache::field_offsets()");
+ Add(ExternalReference::transcendental_cache_array_address().address(),
+ UNCLASSIFIED,
+ 26,
+ "TranscendentalCache::caches()");
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 2c495d8529..08399ee8dd 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -1379,6 +1379,7 @@ static void ReportCodeKindStatistics() {
CASE(STORE_IC);
CASE(KEYED_STORE_IC);
CASE(CALL_IC);
+ CASE(BINARY_OP_IC);
}
}
@@ -1413,7 +1414,7 @@ static void ReportHistogram(bool print_spill) {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (heap_histograms[i].number() > 0) {
- PrintF(" %-33s%10d (%10d bytes)\n",
+ PrintF(" %-34s%10d (%10d bytes)\n",
heap_histograms[i].name(),
heap_histograms[i].number(),
heap_histograms[i].bytes());
@@ -1430,7 +1431,7 @@ static void ReportHistogram(bool print_spill) {
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
- PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+ PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
string_bytes);
}
@@ -1499,7 +1500,7 @@ void NewSpace::ReportStatistics() {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-33s%10d (%10d bytes)\n",
+ PrintF(" %-34s%10d (%10d bytes)\n",
allocated_histogram_[i].name(),
allocated_histogram_[i].number(),
allocated_histogram_[i].bytes());
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
new file mode 100644
index 0000000000..16fe25e975
--- /dev/null
+++ b/deps/v8/src/splay-tree-inl.h
@@ -0,0 +1,276 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPLAY_TREE_INL_H_
+#define V8_SPLAY_TREE_INL_H_
+
+#include "splay-tree.h"
+
+namespace v8 {
+namespace internal {
+
+
+template<typename Config, class Allocator>
+SplayTree<Config, Allocator>::~SplayTree() {
+ NodeDeleter deleter;
+ ForEachNode(&deleter);
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
+ if (is_empty()) {
+ // If the tree is empty, insert the new node.
+ root_ = new Node(key, Config::kNoValue);
+ } else {
+ // Splay on the key to move the last node on the search path
+ // for the key to the root of the tree.
+ Splay(key);
+ // Ignore repeated insertions with the same key.
+ int cmp = Config::Compare(key, root_->key_);
+ if (cmp == 0) {
+ locator->bind(root_);
+ return false;
+ }
+ // Insert the new node.
+ Node* node = new Node(key, Config::kNoValue);
+ if (cmp > 0) {
+ node->left_ = root_;
+ node->right_ = root_->right_;
+ root_->right_ = NULL;
+ } else {
+ node->right_ = root_;
+ node->left_ = root_->left_;
+ root_->left_ = NULL;
+ }
+ root_ = node;
+ }
+ locator->bind(root_);
+ return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
+ if (is_empty())
+ return false;
+ Splay(key);
+ if (Config::Compare(key, root_->key_) == 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
+ Locator* locator) {
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key or the last
+ // node on the search path to the top of the tree.
+ Splay(key);
+ // Now the result is either the root node or the greatest node in
+ // the left subtree.
+ int cmp = Config::Compare(root_->key_, key);
+ if (cmp <= 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ Node* temp = root_;
+ root_ = root_->left_;
+ bool result = FindGreatest(locator);
+ root_ = temp;
+ return result;
+ }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
+ Locator* locator) {
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key or the last
+ // node on the search path to the top of the tree.
+ Splay(key);
+ // Now the result is either the root node or the least node in
+ // the right subtree.
+ int cmp = Config::Compare(root_->key_, key);
+ if (cmp >= 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ Node* temp = root_;
+ root_ = root_->right_;
+ bool result = FindLeast(locator);
+ root_ = temp;
+ return result;
+ }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
+ if (is_empty())
+ return false;
+ Node* current = root_;
+ while (current->right_ != NULL)
+ current = current->right_;
+ locator->bind(current);
+ return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
+ if (is_empty())
+ return false;
+ Node* current = root_;
+ while (current->left_ != NULL)
+ current = current->left_;
+ locator->bind(current);
+ return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Remove(const Key& key) {
+ // Bail if the tree is empty
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key to the top.
+ Splay(key);
+ // Bail if the key is not in the tree
+ if (Config::Compare(key, root_->key_) != 0)
+ return false;
+ if (root_->left_ == NULL) {
+ // No left child, so the new tree is just the right child.
+ root_ = root_->right_;
+ } else {
+ // Left child exists.
+ Node* right = root_->right_;
+ // Make the original left child the new root.
+ root_ = root_->left_;
+ // Splay to make sure that the new root has an empty right child.
+ Splay(key);
+ // Insert the original right child as the right child of the new
+ // root.
+ root_->right_ = right;
+ }
+ return true;
+}
+
+
+template<typename Config, class Allocator>
+void SplayTree<Config, Allocator>::Splay(const Key& key) {
+ if (is_empty())
+ return;
+ Node dummy_node(Config::kNoKey, Config::kNoValue);
+ // Create a dummy node. The use of the dummy node is a bit
+ // counter-intuitive: The right child of the dummy node will hold
+ // the L tree of the algorithm. The left child of the dummy node
+ // will hold the R tree of the algorithm. Using a dummy node, left
+ // and right will always be nodes and we avoid special cases.
+ Node* dummy = &dummy_node;
+ Node* left = dummy;
+ Node* right = dummy;
+ Node* current = root_;
+ while (true) {
+ int cmp = Config::Compare(key, current->key_);
+ if (cmp < 0) {
+ if (current->left_ == NULL)
+ break;
+ if (Config::Compare(key, current->left_->key_) < 0) {
+ // Rotate right.
+ Node* temp = current->left_;
+ current->left_ = temp->right_;
+ temp->right_ = current;
+ current = temp;
+ if (current->left_ == NULL)
+ break;
+ }
+ // Link right.
+ right->left_ = current;
+ right = current;
+ current = current->left_;
+ } else if (cmp > 0) {
+ if (current->right_ == NULL)
+ break;
+ if (Config::Compare(key, current->right_->key_) > 0) {
+ // Rotate left.
+ Node* temp = current->right_;
+ current->right_ = temp->left_;
+ temp->left_ = current;
+ current = temp;
+ if (current->right_ == NULL)
+ break;
+ }
+ // Link left.
+ left->right_ = current;
+ left = current;
+ current = current->right_;
+ } else {
+ break;
+ }
+ }
+ // Assemble.
+ left->right_ = current->left_;
+ right->left_ = current->right_;
+ current->left_ = dummy->right_;
+ current->right_ = dummy->left_;
+ root_ = current;
+}
+
+
+template <typename Config, class Allocator> template <class Callback>
+void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
+ NodeToPairAdaptor<Callback> callback_adaptor(callback);
+ ForEachNode(&callback_adaptor);
+}
+
+
+template <typename Config, class Allocator> template <class Callback>
+void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
+ // Pre-allocate some space for tiny trees.
+ List<Node*, Allocator> nodes_to_visit(10);
+ if (root_ != NULL) nodes_to_visit.Add(root_);
+ int pos = 0;
+ while (pos < nodes_to_visit.length()) {
+ Node* node = nodes_to_visit[pos++];
+ if (node->left() != NULL) nodes_to_visit.Add(node->left());
+ if (node->right() != NULL) nodes_to_visit.Add(node->right());
+ callback->Call(node);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_SPLAY_TREE_INL_H_
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
new file mode 100644
index 0000000000..b0f415d827
--- /dev/null
+++ b/deps/v8/src/splay-tree.h
@@ -0,0 +1,191 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPLAY_TREE_H_
+#define V8_SPLAY_TREE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// A splay tree. The config type parameter encapsulates the different
+// configurations of a concrete splay tree:
+//
+// typedef Key: the key type
+// typedef Value: the value type
+// static const kNoKey: the dummy key used when no key is set
+// static const kNoValue: the dummy value used to initialize nodes
+// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+//
+// The tree is also parameterized by an allocation policy
+// (Allocator). The policy is used for allocating lists in the C free
+// store or the zone; see zone.h.
+
+// Forward defined as
+// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
+// class SplayTree;
+template <typename Config, class Allocator>
+class SplayTree {
+ public:
+ typedef typename Config::Key Key;
+ typedef typename Config::Value Value;
+
+ class Locator;
+
+ SplayTree() : root_(NULL) { }
+ ~SplayTree();
+
+ INLINE(void* operator new(size_t size)) {
+ return Allocator::New(static_cast<int>(size));
+ }
+ INLINE(void operator delete(void* p, size_t)) { return Allocator::Delete(p); }
+
+ // Inserts the given key in this tree with the given value. Returns
+ // true if a node was inserted, otherwise false. If found the locator
+ // is enabled and provides access to the mapping for the key.
+ bool Insert(const Key& key, Locator* locator);
+
+ // Looks up the key in this tree and returns true if it was found,
+ // otherwise false. If the node is found the locator is enabled and
+ // provides access to the mapping for the key.
+ bool Find(const Key& key, Locator* locator);
+
+ // Finds the mapping with the greatest key less than or equal to the
+ // given key.
+ bool FindGreatestLessThan(const Key& key, Locator* locator);
+
+ // Find the mapping with the greatest key in this tree.
+ bool FindGreatest(Locator* locator);
+
+ // Finds the mapping with the least key greater than or equal to the
+ // given key.
+ bool FindLeastGreaterThan(const Key& key, Locator* locator);
+
+ // Find the mapping with the least key in this tree.
+ bool FindLeast(Locator* locator);
+
+ // Remove the node with the given key from the tree.
+ bool Remove(const Key& key);
+
+ bool is_empty() { return root_ == NULL; }
+
+ // Perform the splay operation for the given key. Moves the node with
+ // the given key to the top of the tree. If no node has the given
+ // key, the last node on the search path is moved to the top of the
+ // tree.
+ void Splay(const Key& key);
+
+ class Node {
+ public:
+ Node(const Key& key, const Value& value)
+ : key_(key),
+ value_(value),
+ left_(NULL),
+ right_(NULL) { }
+
+ INLINE(void* operator new(size_t size)) {
+ return Allocator::New(static_cast<int>(size));
+ }
+ INLINE(void operator delete(void* p, size_t)) {
+ return Allocator::Delete(p);
+ }
+
+ Key key() { return key_; }
+ Value value() { return value_; }
+ Node* left() { return left_; }
+ Node* right() { return right_; }
+ private:
+
+ friend class SplayTree;
+ friend class Locator;
+ Key key_;
+ Value value_;
+ Node* left_;
+ Node* right_;
+ };
+
+ // A locator provides access to a node in the tree without actually
+ // exposing the node.
+ class Locator BASE_EMBEDDED {
+ public:
+ explicit Locator(Node* node) : node_(node) { }
+ Locator() : node_(NULL) { }
+ const Key& key() { return node_->key_; }
+ Value& value() { return node_->value_; }
+ void set_value(const Value& value) { node_->value_ = value; }
+ inline void bind(Node* node) { node_ = node; }
+ private:
+ Node* node_;
+ };
+
+ template <class Callback>
+ void ForEach(Callback* callback);
+
+ protected:
+
+ // Resets tree root. Existing nodes become unreachable.
+ void ResetRoot() { root_ = NULL; }
+
+ private:
+
+ template<class Callback>
+ class NodeToPairAdaptor BASE_EMBEDDED {
+ public:
+ explicit NodeToPairAdaptor(Callback* callback)
+ : callback_(callback) { }
+ void Call(Node* node) {
+ callback_->Call(node->key(), node->value());
+ }
+
+ private:
+ Callback* callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
+ };
+
+ class NodeDeleter BASE_EMBEDDED {
+ public:
+ NodeDeleter() { }
+ void Call(Node* node) { delete node; }
+
+ private:
+
+ DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
+ };
+
+ template <class Callback>
+ void ForEachNode(Callback* callback);
+
+ Node* root_;
+
+ DISALLOW_COPY_AND_ASSIGN(SplayTree);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_SPLAY_TREE_H_
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 49f403de7d..067578c141 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -69,7 +69,7 @@ function StringCharAt(pos) {
if (index >= subject.length || index < 0) return "";
char_code = %StringCharCodeAt(subject, index);
}
- return %CharFromCode(char_code);
+ return %_CharFromCode(char_code);
}
@@ -184,7 +184,7 @@ function SubString(string, start, end) {
if (!%_IsSmi(char_code)) {
char_code = %StringCharCodeAt(string, start);
}
- return %CharFromCode(char_code);
+ return %_CharFromCode(char_code);
}
return %_SubString(string, start, end);
}
@@ -530,11 +530,7 @@ function StringSplit(separator, limit) {
var separator_length = separator.length;
// If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) {
- var result = $Array(length);
- for (var i = 0; i < length; i++) result[i] = subject[i];
- return result;
- }
+ if (separator_length === 0) return %StringToArray(subject);
var result = [];
var start_index = 0;
@@ -726,7 +722,7 @@ function StringTrimRight() {
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
- if (n == 1) return %CharFromCode(ToNumber(code) & 0xffff)
+ if (n == 1) return %_CharFromCode(ToNumber(code) & 0xffff)
// NOTE: This is not super-efficient, but it is necessary because we
// want to avoid converting to numbers from within the virtual
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index b9db4be52e..7c8a1c96eb 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
+#include "messages.h"
#include "platform.h"
#include "simulator.h"
#include "string-stream.h"
diff --git a/deps/v8/src/usage-analyzer.cc b/deps/v8/src/usage-analyzer.cc
deleted file mode 100644
index 74cf9828ae..0000000000
--- a/deps/v8/src/usage-analyzer.cc
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-#include "usage-analyzer.h"
-
-namespace v8 {
-namespace internal {
-
-// Weight boundaries
-static const int MinWeight = 1;
-static const int MaxWeight = 1000000;
-static const int InitialWeight = 100;
-
-
-class UsageComputer: public AstVisitor {
- public:
- static bool Traverse(AstNode* node);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- void VisitVariable(Variable* var);
-
- private:
- int weight_;
- bool is_write_;
-
- UsageComputer(int weight, bool is_write);
- virtual ~UsageComputer();
-
- // Helper functions
- void RecordUses(UseCount* uses);
- void Read(Expression* x);
- void Write(Expression* x);
- void ReadList(ZoneList<Expression*>* list);
- void ReadList(ZoneList<ObjectLiteral::Property*>* list);
-
- friend class WeightScaler;
-};
-
-
-class WeightScaler BASE_EMBEDDED {
- public:
- WeightScaler(UsageComputer* uc, float scale);
- ~WeightScaler();
-
- private:
- UsageComputer* uc_;
- int old_weight_;
-};
-
-
-// ----------------------------------------------------------------------------
-// Implementation of UsageComputer
-
-bool UsageComputer::Traverse(AstNode* node) {
- UsageComputer uc(InitialWeight, false);
- uc.Visit(node);
- return !uc.HasStackOverflow();
-}
-
-
-void UsageComputer::VisitBlock(Block* node) {
- VisitStatements(node->statements());
-}
-
-
-void UsageComputer::VisitDeclaration(Declaration* node) {
- Write(node->proxy());
- if (node->fun() != NULL)
- VisitFunctionLiteral(node->fun());
-}
-
-
-void UsageComputer::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-
-void UsageComputer::VisitEmptyStatement(EmptyStatement* node) {
- // nothing to do
-}
-
-
-void UsageComputer::VisitIfStatement(IfStatement* node) {
- Read(node->condition());
- { WeightScaler ws(this, 0.5); // executed 50% of the time
- Visit(node->then_statement());
- Visit(node->else_statement());
- }
-}
-
-
-void UsageComputer::VisitContinueStatement(ContinueStatement* node) {
- // nothing to do
-}
-
-
-void UsageComputer::VisitBreakStatement(BreakStatement* node) {
- // nothing to do
-}
-
-
-void UsageComputer::VisitReturnStatement(ReturnStatement* node) {
- Read(node->expression());
-}
-
-
-void UsageComputer::VisitWithEnterStatement(WithEnterStatement* node) {
- Read(node->expression());
-}
-
-
-void UsageComputer::VisitWithExitStatement(WithExitStatement* node) {
- // nothing to do
-}
-
-
-void UsageComputer::VisitSwitchStatement(SwitchStatement* node) {
- Read(node->tag());
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = cases->length(); i-- > 0;) {
- WeightScaler ws(this, static_cast<float>(1.0 / cases->length()));
- CaseClause* clause = cases->at(i);
- if (!clause->is_default())
- Read(clause->label());
- VisitStatements(clause->statements());
- }
-}
-
-
-void UsageComputer::VisitDoWhileStatement(DoWhileStatement* node) {
- WeightScaler ws(this, 10.0);
- Read(node->cond());
- Visit(node->body());
-}
-
-
-void UsageComputer::VisitWhileStatement(WhileStatement* node) {
- WeightScaler ws(this, 10.0);
- Read(node->cond());
- Visit(node->body());
-}
-
-
-void UsageComputer::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) Visit(node->init());
- { WeightScaler ws(this, 10.0); // executed in each iteration
- if (node->cond() != NULL) Read(node->cond());
- if (node->next() != NULL) Visit(node->next());
- Visit(node->body());
- }
-}
-
-
-void UsageComputer::VisitForInStatement(ForInStatement* node) {
- WeightScaler ws(this, 10.0);
- Write(node->each());
- Read(node->enumerable());
- Visit(node->body());
-}
-
-
-void UsageComputer::VisitTryCatchStatement(TryCatchStatement* node) {
- Visit(node->try_block());
- { WeightScaler ws(this, 0.25);
- Write(node->catch_var());
- Visit(node->catch_block());
- }
-}
-
-
-void UsageComputer::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-
-void UsageComputer::VisitDebuggerStatement(DebuggerStatement* node) {
-}
-
-
-void UsageComputer::VisitFunctionLiteral(FunctionLiteral* node) {
- ZoneList<Declaration*>* decls = node->scope()->declarations();
- for (int i = 0; i < decls->length(); i++) VisitDeclaration(decls->at(i));
- VisitStatements(node->body());
-}
-
-
-void UsageComputer::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* node) {
- // Do nothing.
-}
-
-
-void UsageComputer::VisitConditional(Conditional* node) {
- Read(node->condition());
- { WeightScaler ws(this, 0.5);
- Read(node->then_expression());
- Read(node->else_expression());
- }
-}
-
-
-void UsageComputer::VisitSlot(Slot* node) {
- UNREACHABLE();
-}
-
-
-void UsageComputer::VisitVariable(Variable* node) {
- RecordUses(node->var_uses());
-}
-
-
-void UsageComputer::VisitVariableProxy(VariableProxy* node) {
- // The proxy may refer to a variable in which case it was bound via
- // VariableProxy::BindTo.
- RecordUses(node->var_uses());
-}
-
-
-void UsageComputer::VisitLiteral(Literal* node) {
- // nothing to do
-}
-
-void UsageComputer::VisitRegExpLiteral(RegExpLiteral* node) {
- // nothing to do
-}
-
-
-void UsageComputer::VisitObjectLiteral(ObjectLiteral* node) {
- ReadList(node->properties());
-}
-
-
-void UsageComputer::VisitArrayLiteral(ArrayLiteral* node) {
- ReadList(node->values());
-}
-
-
-void UsageComputer::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Read(node->value());
-}
-
-
-void UsageComputer::VisitAssignment(Assignment* node) {
- if (node->op() != Token::ASSIGN)
- Read(node->target());
- Write(node->target());
- Read(node->value());
-}
-
-
-void UsageComputer::VisitThrow(Throw* node) {
- Read(node->exception());
-}
-
-
-void UsageComputer::VisitProperty(Property* node) {
- // In any case (read or write) we read both the
- // node's object and the key.
- Read(node->obj());
- Read(node->key());
- // If the node's object is a variable proxy,
- // we have a 'simple' object property access. We count
- // the access via the variable or proxy's object uses.
- VariableProxy* proxy = node->obj()->AsVariableProxy();
- if (proxy != NULL) {
- RecordUses(proxy->obj_uses());
- }
-}
-
-
-void UsageComputer::VisitCall(Call* node) {
- Read(node->expression());
- ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitCallNew(CallNew* node) {
- Read(node->expression());
- ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitCallRuntime(CallRuntime* node) {
- ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitUnaryOperation(UnaryOperation* node) {
- Read(node->expression());
-}
-
-
-void UsageComputer::VisitCountOperation(CountOperation* node) {
- Read(node->expression());
- Write(node->expression());
-}
-
-
-void UsageComputer::VisitBinaryOperation(BinaryOperation* node) {
- Read(node->left());
- Read(node->right());
-}
-
-
-void UsageComputer::VisitCompareOperation(CompareOperation* node) {
- Read(node->left());
- Read(node->right());
-}
-
-
-void UsageComputer::VisitThisFunction(ThisFunction* node) {
-}
-
-
-UsageComputer::UsageComputer(int weight, bool is_write) {
- weight_ = weight;
- is_write_ = is_write;
-}
-
-
-UsageComputer::~UsageComputer() {
- // nothing to do
-}
-
-
-void UsageComputer::RecordUses(UseCount* uses) {
- if (is_write_)
- uses->RecordWrite(weight_);
- else
- uses->RecordRead(weight_);
-}
-
-
-void UsageComputer::Read(Expression* x) {
- if (is_write_) {
- UsageComputer uc(weight_, false);
- uc.Visit(x);
- } else {
- Visit(x);
- }
-}
-
-
-void UsageComputer::Write(Expression* x) {
- if (!is_write_) {
- UsageComputer uc(weight_, true);
- uc.Visit(x);
- } else {
- Visit(x);
- }
-}
-
-
-void UsageComputer::ReadList(ZoneList<Expression*>* list) {
- for (int i = list->length(); i-- > 0; )
- Read(list->at(i));
-}
-
-
-void UsageComputer::ReadList(ZoneList<ObjectLiteral::Property*>* list) {
- for (int i = list->length(); i-- > 0; )
- Read(list->at(i)->value());
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of WeightScaler
-
-WeightScaler::WeightScaler(UsageComputer* uc, float scale) {
- uc_ = uc;
- old_weight_ = uc->weight_;
- int new_weight = static_cast<int>(uc->weight_ * scale);
- if (new_weight <= 0) new_weight = MinWeight;
- else if (new_weight > MaxWeight) new_weight = MaxWeight;
- uc->weight_ = new_weight;
-}
-
-
-WeightScaler::~WeightScaler() {
- uc_->weight_ = old_weight_;
-}
-
-
-// ----------------------------------------------------------------------------
-// Interface to variable usage analysis
-
-bool AnalyzeVariableUsage(FunctionLiteral* lit) {
- if (!FLAG_usage_computation) return true;
- HistogramTimerScope timer(&Counters::usage_analysis);
- return UsageComputer::Traverse(lit);
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 2fcd241fd5..deab09fb11 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -528,11 +528,11 @@ static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
- // Number of characters in a uint32_t.
- static const int kStepSize = sizeof(uint32_t) / sizeof(*dest); // NOLINT
+ // Number of characters in a uintptr_t.
+ static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
while (dest <= limit - kStepSize) {
- *reinterpret_cast<uint32_t*>(dest) =
- *reinterpret_cast<const uint32_t*>(src);
+ *reinterpret_cast<uintptr_t*>(dest) =
+ *reinterpret_cast<const uintptr_t*>(src);
dest += kStepSize;
src += kStepSize;
}
@@ -544,6 +544,58 @@ static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
+// Compare ASCII/16bit chars to ASCII/16bit chars.
+template <typename lchar, typename rchar>
+static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+ const lchar* limit = lhs + chars;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ if (sizeof(*lhs) == sizeof(*rhs)) {
+ // Number of characters in a uintptr_t.
+ static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
+ while (lhs <= limit - kStepSize) {
+ if (*reinterpret_cast<const uintptr_t*>(lhs) !=
+ *reinterpret_cast<const uintptr_t*>(rhs)) {
+ break;
+ }
+ lhs += kStepSize;
+ rhs += kStepSize;
+ }
+ }
+#endif
+ while (lhs < limit) {
+ int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
+ if (r != 0) return r;
+ ++lhs;
+ ++rhs;
+ }
+ return 0;
+}
+
+
+template <typename T>
+static inline void MemsetPointer(T** dest, T* value, int counter) {
+#if defined(V8_HOST_ARCH_IA32)
+#define STOS "stosl"
+#elif defined(V8_HOST_ARCH_X64)
+#define STOS "stosq"
+#endif
+
+#if defined(__GNUC__) && defined(STOS)
+ asm("cld;"
+ "rep ; " STOS
+ : /* no output */
+ : "c" (counter), "a" (value), "D" (dest)
+ : /* no clobbered list as all inputs are considered clobbered */);
+#else
+ for (int i = 0; i < counter; i++) {
+ dest[i] = value;
+ }
+#endif
+
+#undef STOS
+}
+
+
// Calculate 10^exponent.
int TenToThe(int exponent);
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index eaac2dbbbe..2b493fbcdd 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -169,7 +169,24 @@ namespace internal {
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime)
+ SC(number_to_string_runtime, V8.NumberToStringRuntime) \
+ SC(math_abs, V8.MathAbs) \
+ SC(math_acos, V8.MathAcos) \
+ SC(math_asin, V8.MathAsin) \
+ SC(math_atan, V8.MathAtan) \
+ SC(math_atan2, V8.MathAtan2) \
+ SC(math_ceil, V8.MathCeil) \
+ SC(math_cos, V8.MathCos) \
+ SC(math_exp, V8.MathExp) \
+ SC(math_floor, V8.MathFloor) \
+ SC(math_log, V8.MathLog) \
+ SC(math_pow, V8.MathPow) \
+ SC(math_round, V8.MathRound) \
+ SC(math_sin, V8.MathSin) \
+ SC(math_sqrt, V8.MathSqrt) \
+ SC(math_tan, V8.MathTan) \
+ SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
+ SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index b3624c5d54..d58f30f3d4 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -67,7 +67,7 @@
#include "spaces-inl.h"
#include "heap-inl.h"
#include "log-inl.h"
-#include "messages.h"
+#include "handles-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 3bcd48a8b2..f46a54d6ef 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -35,57 +35,6 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// Implementation UseCount.
-
-UseCount::UseCount()
- : nreads_(0),
- nwrites_(0) {
-}
-
-
-void UseCount::RecordRead(int weight) {
- ASSERT(weight > 0);
- nreads_ += weight;
- // We must have a positive nreads_ here. Handle
- // any kind of overflow by setting nreads_ to
- // some large-ish value.
- if (nreads_ <= 0) nreads_ = 1000000;
- ASSERT(is_read() & is_used());
-}
-
-
-void UseCount::RecordWrite(int weight) {
- ASSERT(weight > 0);
- nwrites_ += weight;
- // We must have a positive nwrites_ here. Handle
- // any kind of overflow by setting nwrites_ to
- // some large-ish value.
- if (nwrites_ <= 0) nwrites_ = 1000000;
- ASSERT(is_written() && is_used());
-}
-
-
-void UseCount::RecordAccess(int weight) {
- RecordRead(weight);
- RecordWrite(weight);
-}
-
-
-void UseCount::RecordUses(UseCount* uses) {
- if (uses->nreads() > 0) RecordRead(uses->nreads());
- if (uses->nwrites() > 0) RecordWrite(uses->nwrites());
-}
-
-
-#ifdef DEBUG
-void UseCount::Print() {
- // PrintF("r = %d, w = %d", nreads_, nwrites_);
- PrintF("%du = %dr + %dw", nuses(), nreads(), nwrites());
-}
-#endif
-
-
-// ----------------------------------------------------------------------------
// Implementation StaticType.
@@ -136,6 +85,12 @@ Slot* Variable::slot() const {
}
+bool Variable::IsStackAllocated() const {
+ Slot* s = slot();
+ return s != NULL && s->IsStackAllocated();
+}
+
+
Variable::Variable(Scope* scope,
Handle<String> name,
Mode mode,
@@ -148,6 +103,7 @@ Variable::Variable(Scope* scope,
kind_(kind),
local_if_not_shadowed_(NULL),
is_accessed_from_inner_scope_(false),
+ is_used_(false),
rewrite_(NULL) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index ac7f2940dd..a68aa337f4 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -33,35 +33,6 @@
namespace v8 {
namespace internal {
-class UseCount BASE_EMBEDDED {
- public:
- UseCount();
-
- // Inform the node of a "use". The weight can be used to indicate
- // heavier use, for instance if the variable is accessed inside a loop.
- void RecordRead(int weight);
- void RecordWrite(int weight);
- void RecordAccess(int weight); // records a read & write
- void RecordUses(UseCount* uses);
-
- int nreads() const { return nreads_; }
- int nwrites() const { return nwrites_; }
- int nuses() const { return nreads_ + nwrites_; }
-
- bool is_read() const { return nreads() > 0; }
- bool is_written() const { return nwrites() > 0; }
- bool is_used() const { return nuses() > 0; }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- int nreads_;
- int nwrites_;
-};
-
-
// Variables and AST expression nodes can track their "type" to enable
// optimizations and removal of redundant checks when generating code.
@@ -168,13 +139,15 @@ class Variable: public ZoneObject {
bool is_accessed_from_inner_scope() const {
return is_accessed_from_inner_scope_;
}
- UseCount* var_uses() { return &var_uses_; }
- UseCount* obj_uses() { return &obj_uses_; }
+ bool is_used() { return is_used_; }
+ void set_is_used(bool flag) { is_used_ = flag; }
bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
+ bool IsStackAllocated() const;
+
bool is_dynamic() const {
return (mode_ == DYNAMIC ||
mode_ == DYNAMIC_GLOBAL ||
@@ -216,8 +189,7 @@ class Variable: public ZoneObject {
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
- UseCount var_uses_; // uses of the variable value
- UseCount obj_uses_; // uses of the object the variable points to
+ bool is_used_;
// Static type information
StaticType type_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index a9d9d0449c..79832eaef7 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
-#define BUILD_NUMBER 2
-#define PATCH_LEVEL 6
+#define BUILD_NUMBER 3
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/virtual-frame-inl.h b/deps/v8/src/virtual-frame-inl.h
index 3476e41fbb..9aa88fe184 100644
--- a/deps/v8/src/virtual-frame-inl.h
+++ b/deps/v8/src/virtual-frame-inl.h
@@ -33,6 +33,21 @@
namespace v8 {
namespace internal {
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address. All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+ }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ register_locations_[i] = kIllegalIndex;
+ }
+}
+
+
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
: elements_(original->element_count()),
@@ -50,7 +65,7 @@ void VirtualFrame::PushFrameSlotAt(int index) {
}
-void VirtualFrame::Push(Register reg, NumberInfo::Type info) {
+void VirtualFrame::Push(Register reg, NumberInfo info) {
if (is_used(reg)) {
int index = register_location(reg);
FrameElement element = CopyElementAt(index, info);
diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc
index d5b5f42ae8..8c13a6a862 100644
--- a/deps/v8/src/virtual-frame.cc
+++ b/deps/v8/src/virtual-frame.cc
@@ -43,7 +43,7 @@ namespace internal {
// not conflict with the existing type information and must be equally or
// more precise. The default parameter value kUninitialized means that there
// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo::Type info) {
+FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo info) {
ASSERT(index >= 0);
ASSERT(index < element_count());
@@ -74,14 +74,14 @@ FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo::Type info) {
result.set_index(index);
elements_[index].set_copied();
// Update backing element's number information.
- NumberInfo::Type existing = elements_[index].number_info();
- ASSERT(existing != NumberInfo::kUninitialized);
+ NumberInfo existing = elements_[index].number_info();
+ ASSERT(!existing.IsUninitialized());
// Assert that the new type information (a) does not conflict with the
// existing one and (b) is equally or more precise.
- ASSERT((info == NumberInfo::kUninitialized) ||
- (existing | info) != NumberInfo::kUninitialized);
- ASSERT(existing <= info);
- elements_[index].set_number_info(info != NumberInfo::kUninitialized
+ ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
+ ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
+
+ elements_[index].set_number_info(!info.IsUninitialized()
? info
: existing);
break;
@@ -104,7 +104,7 @@ void VirtualFrame::Adjust(int count) {
ASSERT(stack_pointer_ == element_count() - 1);
for (int i = 0; i < count; i++) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
}
stack_pointer_ += count;
}
@@ -152,7 +152,7 @@ void VirtualFrame::SpillElementAt(int index) {
SyncElementAt(index);
// Number type information is preserved.
// Copies get their number information from their backing element.
- NumberInfo::Type info;
+ NumberInfo info;
if (!elements_[index].is_copy()) {
info = elements_[index].number_info();
} else {
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index b3c5e33fd5..0c5d481f5a 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -61,10 +61,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToRuntime expects rax to contain the number of arguments
+ // JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToRuntime(ExternalReference(id), 1);
+ __ JumpToExternalReference(ExternalReference(id), 1);
}
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 6ffb5ac7f7..b10c3f9cf3 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -278,6 +278,7 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
+ Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
// Initialize state.
info_ = info;
@@ -3876,6 +3877,49 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateCharFromCode");
+ ASSERT(args->length() == 1);
+
+ Load(args->at(0));
+ Result code = frame_->Pop();
+ code.ToRegister();
+ ASSERT(code.is_valid());
+
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+
+ JumpTarget slow_case;
+ JumpTarget exit;
+
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ Condition is_smi = __ CheckSmi(code.reg());
+ slow_case.Branch(NegateCondition(is_smi), &code, not_taken);
+
+ __ SmiToInteger32(kScratchRegister, code.reg());
+ __ cmpl(kScratchRegister, Immediate(String::kMaxAsciiCharCode));
+ slow_case.Branch(above, &code, not_taken);
+
+ __ Move(temp.reg(), Factory::single_character_string_cache());
+ __ movq(temp.reg(), FieldOperand(temp.reg(),
+ kScratchRegister, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
+ slow_case.Branch(equal, &code, not_taken);
+ code.Unuse();
+
+ frame_->Push(&temp);
+ exit.Jump();
+
+ slow_case.Bind(&code);
+ frame_->Push(&code);
+ Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
+ frame_->Push(&result);
+
+ exit.Bind();
+}
+
+
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -3888,6 +3932,25 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
+// Generates the Math.pow method - currently just calls runtime.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Load(args->at(0));
+ Load(args->at(1));
+ Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
+ frame_->Push(&res);
+}
+
+
+// Generates the Math.sqrt method - currently just calls runtime.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result res = frame_->CallRuntime(Runtime::kMath_sqrt, 1);
+ frame_->Push(&res);
+}
+
+
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -3955,21 +4018,12 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
frame_->SpillAll();
__ push(rsi);
- // Make sure the frame is aligned like the OS expects.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
- __ and_(rsp, Immediate(-kFrameAlignment));
- }
+ static const int num_arguments = 0;
+ __ PrepareCallCFunction(num_arguments);
// Call V8::RandomPositiveSmi().
- __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
-
- // Restore stack pointer from callee-saved register.
- if (kFrameAlignment > 0) {
- __ movq(rsp, rbx);
- }
+ __ CallCFunction(ExternalReference::random_positive_smi_function(),
+ num_arguments);
__ pop(rsi);
Result result = allocator_->Allocate(rax);
@@ -4002,6 +4056,24 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+ Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+ Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
@@ -5217,7 +5289,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
}
// Get number type of left and right sub-expressions.
- NumberInfo::Type operands_type =
+ NumberInfo operands_type =
NumberInfo::Combine(left.number_info(), right.number_info());
Result answer;
@@ -5253,7 +5325,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Set NumberInfo of result according to the operation performed.
// We rely on the fact that smis have a 32 bit payload on x64.
ASSERT(kSmiValueSize == 32);
- NumberInfo::Type result_type = NumberInfo::kUnknown;
+ NumberInfo result_type = NumberInfo::Unknown();
switch (op) {
case Token::COMMA:
result_type = right.number_info();
@@ -5267,32 +5339,32 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
case Token::BIT_XOR:
case Token::BIT_AND:
// Result is always a smi.
- result_type = NumberInfo::kSmi;
+ result_type = NumberInfo::Smi();
break;
case Token::SAR:
case Token::SHL:
// Result is always a smi.
- result_type = NumberInfo::kSmi;
+ result_type = NumberInfo::Smi();
break;
case Token::SHR:
// Result of x >>> y is always a smi if y >= 1, otherwise a number.
result_type = (right.is_constant() && right.handle()->IsSmi()
&& Smi::cast(*right.handle())->value() >= 1)
- ? NumberInfo::kSmi
- : NumberInfo::kNumber;
+ ? NumberInfo::Smi()
+ : NumberInfo::Number();
break;
case Token::ADD:
// Result could be a string or a number. Check types of inputs.
- result_type = NumberInfo::IsNumber(operands_type)
- ? NumberInfo::kNumber
- : NumberInfo::kUnknown;
+ result_type = operands_type.IsNumber()
+ ? NumberInfo::Number()
+ : NumberInfo::Unknown();
break;
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
// Result is always a number.
- result_type = NumberInfo::kNumber;
+ result_type = NumberInfo::Number();
break;
default:
UNREACHABLE();
@@ -6324,7 +6396,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ push(rsi);
__ push(rdx);
__ push(rcx); // Restore return address.
- __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
}
@@ -6366,7 +6438,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
@@ -6422,8 +6494,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ret(3 * kPointerSize);
__ bind(&slow_case);
- ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
- __ TailCallRuntime(runtime, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
@@ -6782,10 +6853,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
}
@@ -7129,7 +7200,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
}
@@ -7540,7 +7611,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -7597,9 +7668,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(rbx); // Return address.
__ push(rdx);
__ push(rbx);
- Runtime::Function* f =
- Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
- __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
@@ -8088,8 +8157,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ push(rax);
// Do tail-call to runtime routine.
- Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
- __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
}
@@ -8273,7 +8341,7 @@ const char* GenericBinaryOpStub::GetName() {
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
use_sse3_ ? "SSE3" : "SSE2",
- NumberInfo::ToString(operands_type_));
+ operands_type_.ToString());
return name_;
}
@@ -8597,7 +8665,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: {
// rax: y
// rdx: x
- if (NumberInfo::IsNumber(operands_type_)) {
+ if (operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
@@ -8839,6 +8907,11 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
}
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ return Handle<Code>::null();
+}
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
@@ -8936,16 +9009,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx: length of first string
// rcx: length of second string
// rdx: second string
- // r8: instance type of first string if string check was performed above
- // r9: instance type of first string if string check was performed above
- Label string_add_flat_result;
+ // r8: map of first string if string check was performed above
+ // r9: map of second string if string check was performed above
+ Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
- // Look at the length of the result of adding the two strings.
- __ addl(rbx, rcx);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmpl(rbx, Immediate(2));
- __ j(equal, &string_add_runtime);
+
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
if (!string_check_) {
@@ -8955,6 +9023,35 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Get the instance types of the two strings as they will be needed soon.
__ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+
+ // Look at the length of the result of adding the two strings.
+ __ addl(rbx, rcx);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmpl(rbx, Immediate(2));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, r14, r12, rdi, r15,
+ &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(rbx, 2);
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
// Check if resulting string will be flat.
__ cmpl(rbx, Immediate(String::kMinNonFlatLength));
__ j(below, &string_add_flat_result);
@@ -9021,6 +9118,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(zero, &non_ascii_string_add_flat_result);
__ testl(r9, Immediate(kAsciiStringTag));
__ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
__ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
// rcx: result string
@@ -9087,7 +9186,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
}
@@ -9171,6 +9270,179 @@ void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&done);
}
+void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ movq(scratch, c1);
+ __ subq(scratch, Immediate(static_cast<int>('0')));
+ __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ movq(scratch, c2);
+ __ subq(scratch, Immediate(static_cast<int>('0')));
+ __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, Immediate(kBitsPerByte));
+ __ orl(chars, c2);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ SmiToInteger32(mask, mask);
+ __ decl(mask);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string (32-bit int)
+ // symbol_table: symbol table
+ // mask: capacity mask (32-bit int)
+ // undefined: undefined value
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ movl(scratch, hash);
+ if (i > 0) {
+ __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ andl(scratch, mask);
+
+ // Load the entry from the symble table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ ASSERT_EQ(1, SymbolTable::kEntrySize);
+ __ movq(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmpq(candidate, undefined);
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ cmpl(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+ __ j(not_equal, &next_probe[i]);
+
+ // We use kScratchRegister as a temporary register in assumption that
+ // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
+ Register temp = kScratchRegister;
+
+ // Check that the candidate is a non-external ascii string.
+ __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe[i]);
+
+ // Check if the two characters match.
+ __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ andl(temp, Immediate(0x0000ffff));
+ __ cmpl(chars, temp);
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ if (!result.is(rax)) {
+ __ movq(rax, result);
+ }
+}
+
+
+void StringStubBase::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ movl(hash, character);
+ __ shll(hash, Immediate(10));
+ __ addl(hash, character);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ addl(hash, character);
+ // hash += hash << 10;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(10));
+ __ addl(hash, scratch);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(3));
+ __ addl(hash, scratch);
+ // hash ^= hash >> 11;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(11));
+ __ xorl(hash, scratch);
+ // hash += hash << 15;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(15));
+ __ addl(hash, scratch);
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ testl(hash, hash);
+ __ j(not_zero, &hash_not_zero);
+ __ movl(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -9197,25 +9469,55 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
__ movq(rcx, Operand(rsp, kToOffset));
__ movq(rdx, Operand(rsp, kFromOffset));
__ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
__ j(negative, &runtime);
- // Handle sub-strings of length 2 and less in the runtime system.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
__ SmiToInteger32(rcx, rcx);
__ cmpl(rcx, Immediate(2));
- __ j(below_equal, &runtime);
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length (value is 2)
+ // rdx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
+ __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx,
+ FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, rax, rdx, rdi, r14,
+ &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ Set(rcx, 2);
+
+ __ bind(&result_longer_than_two);
// rax: string
// rbx: instance type
// rcx: result string length
// Check for flat ascii string
Label non_ascii_flat;
- __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
- __ j(not_equal, &non_ascii_flat);
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
// Allocate the result.
__ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
@@ -9281,7 +9583,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
}
@@ -9401,7 +9703,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
#undef __
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 309e2948f4..8f5fdc1722 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -547,6 +547,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateCharFromCode(ZoneList<Expression*>* args);
+
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -572,7 +575,17 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
- // Simple condition analysis.
+ // Fast support for Math.pow().
+ void GenerateMathPow(ZoneList<Expression*>* args);
+
+ // Fast call to math functions.
+ void GenerateMathSin(ZoneList<Expression*>* args);
+ void GenerateMathCos(ZoneList<Expression*>* args);
+
+ // Fast case for sqrt
+ void GenerateMathSqrt(ZoneList<Expression*>* args);
+
+// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
ALWAYS_FALSE,
@@ -651,7 +664,7 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags,
- NumberInfo::Type operands_type = NumberInfo::kUnknown)
+ NumberInfo operands_type = NumberInfo::Unknown())
: op_(op),
mode_(mode),
flags_(flags),
@@ -683,7 +696,7 @@ class GenericBinaryOpStub: public CodeStub {
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
char* name_;
- NumberInfo::Type operands_type_;
+ NumberInfo operands_type_;
const char* GetName();
@@ -697,7 +710,7 @@ class GenericBinaryOpStub: public CodeStub {
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
- NumberInfo::ToString(operands_type_));
+ operands_type_.ToString());
}
#endif
@@ -708,7 +721,7 @@ class GenericBinaryOpStub: public CodeStub {
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+ class NumberInfoBits: public BitField<int, 13, 3> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@@ -719,7 +732,7 @@ class GenericBinaryOpStub: public CodeStub {
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
- | NumberInfoBits::encode(operands_type_);
+ | NumberInfoBits::encode(operands_type_.ThreeBitRepresentation());
}
void Generate(MacroAssembler* masm);
@@ -763,6 +776,33 @@ class StringStubBase: public CodeStub {
Register src, // Must be rsi.
Register count, // Must be rcx.
bool ascii);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register rax.
+ void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found);
+
+ // Generate string hash.
+ void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
};
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index af2a1ff821..5e76901305 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -189,6 +189,7 @@ void FastCodeGenerator::EmitBitOr() {
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
+ Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 669e1750c1..bfa1a44f9d 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -56,6 +56,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) {
__ push(rbp); // Caller's frame pointer.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 0e93637f32..e05f68dcab 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -241,7 +241,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -258,7 +259,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -608,7 +609,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(rdx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(
+ __ TailCallExternalReference(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
__ bind(&slow);
@@ -631,7 +632,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rcx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -650,7 +652,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
__ push(rcx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@@ -1223,7 +1225,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -1385,7 +1388,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1449,7 +1453,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ __ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 90a9c75d9b..dca27806e3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -396,9 +396,9 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : argument num_arguments - 1
@@ -411,12 +411,19 @@ void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
// should remove this need and make the runtime routine entry code
// smarter.
movq(rax, Immediate(num_arguments));
- JumpToRuntime(ext, result_size);
+ JumpToExternalReference(ext, result_size);
}
-void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
- int result_size) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
+ int result_size) {
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
CEntryStub ces(result_size);
@@ -1393,6 +1400,50 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
}
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label *failure) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 2673086dc5..bbb6e21832 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -426,6 +426,20 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* on_not_both_flat_ascii);
+ // Check whether the instance type represents a flat ascii string. Jump to the
+ // label if not. If the instance type can be scratched specify same register
+ // for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+ Register scratch,
+ Label *on_not_flat_ascii_string);
+
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail);
+
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -645,7 +659,6 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- // Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -656,14 +669,19 @@ class MacroAssembler: public Assembler {
int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToRuntime, but also takes care of passing the number
- // of arguments.
- void TailCallRuntime(const ExternalReference& ext,
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to a runtime routine.
- void JumpToRuntime(const ExternalReference& ext, int result_size);
+ void JumpToExternalReference(const ExternalReference& ext, int result_size);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 026301b2af..6e7d9c9e07 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -335,7 +335,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
__ push(backtrack_stackpointer());
- int num_arguments = 3;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
// Put arguments into parameter registers. Parameters are
@@ -849,7 +849,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#endif
// Call GrowStack(backtrack_stackpointer())
- int num_arguments = 2;
+ static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx.
@@ -1029,7 +1029,7 @@ void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// This function call preserves no register values. Caller should
// store anything volatile in a C call or overwritten by this function.
- int num_arguments = 3;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 9c8b4f75aa..5d73b5f005 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -236,7 +236,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ Push(Handle<Map>(transition));
__ push(rax);
__ push(scratch);
- __ TailCallRuntime(
+ __ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
return;
}
@@ -526,7 +526,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@@ -548,7 +548,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(ref, 5, 1);
+ __ TailCallExternalReference(ref, 5, 1);
}
private:
@@ -1360,7 +1360,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4, 1);
+ __ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1438,7 +1438,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1637,7 +1637,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5, 1);
+ __ TailCallExternalReference(load_callback_property, 5, 1);
return true;
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index e322c614df..79be20b1bd 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -37,23 +37,6 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
- for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
- }
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- register_locations_[i] = kIllegalIndex;
- }
-}
-
-
void VirtualFrame::Enter() {
// Registers live on entry to a JS frame:
// rsp: stack pointer, points to return address from this function.
@@ -194,7 +177,7 @@ void VirtualFrame::EmitPop(const Operand& operand) {
}
-void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -202,7 +185,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
}
-void VirtualFrame::EmitPush(const Operand& operand, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(const Operand& operand, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -210,7 +193,7 @@ void VirtualFrame::EmitPush(const Operand& operand, NumberInfo::Type info) {
}
-void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -220,7 +203,7 @@ void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
void VirtualFrame::EmitPush(Smi* smi_value) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(NumberInfo::kSmi));
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Smi()));
stack_pointer_++;
__ Push(smi_value);
}
@@ -228,11 +211,11 @@ void VirtualFrame::EmitPush(Smi* smi_value) {
void VirtualFrame::EmitPush(Handle<Object> value) {
ASSERT(stack_pointer_ == element_count() - 1);
- NumberInfo::Type info = NumberInfo::kUnknown;
+ NumberInfo info = NumberInfo::Unknown();
if (value->IsSmi()) {
- info = NumberInfo::kSmi;
+ info = NumberInfo::Smi();
} else if (value->IsHeapNumber()) {
- info = NumberInfo::kHeapNumber;
+ info = NumberInfo::HeapNumber();
}
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -240,7 +223,7 @@ void VirtualFrame::EmitPush(Handle<Object> value) {
}
-void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@@ -497,7 +480,7 @@ void VirtualFrame::MakeMergable() {
if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) {
// Just spill.
- elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
+ elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown());
} else {
// Allocate to a register.
FrameElement backing_element; // Invalid if not a copy.
@@ -509,7 +492,7 @@ void VirtualFrame::MakeMergable() {
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED,
- NumberInfo::kUnknown);
+ NumberInfo::Unknown());
Use(fresh.reg(), i);
// Emit a move.
@@ -538,7 +521,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop,
// including when registers are spilled.
elements_[i].clear_copied();
- elements_[i].set_number_info(NumberInfo::kUnknown);
+ elements_[i].set_number_info(NumberInfo::Unknown());
}
}
}
@@ -745,7 +728,7 @@ Result VirtualFrame::Pop() {
ASSERT(element.is_valid());
// Get number type information of the result.
- NumberInfo::Type info;
+ NumberInfo info;
if (!element.is_copy()) {
info = element.number_info();
} else {
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index d4df4840e5..ddd606bf4e 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -73,7 +73,7 @@ class VirtualFrame : public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- VirtualFrame();
+ inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
@@ -83,7 +83,7 @@ class VirtualFrame : public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
- NumberInfo::Type info = NumberInfo::kUninitialized);
+ NumberInfo info = NumberInfo::Uninitialized());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -383,19 +383,19 @@ class VirtualFrame : public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(const Operand& operand,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(Heap::RootListIndex index,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(Immediate immediate,
- NumberInfo::Type info = NumberInfo::kUnknown);
+ NumberInfo info = NumberInfo::Unknown());
void EmitPush(Smi* value);
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
// Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+ inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index 121ba19b12..5893a2f80e 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -68,227 +68,12 @@ void Zone::adjust_segment_bytes_allocated(int delta) {
}
-template <typename C>
-bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new Node(key, C::kNoValue);
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = C::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new Node(key, C::kNoValue);
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = NULL;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = NULL;
- }
- root_ = node;
- }
- locator->bind(root_);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
- if (is_empty())
- return false;
- Splay(key);
- if (C::Compare(key, root_->key_) == 0) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = C::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = C::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != NULL)
- current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != NULL)
- current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Remove(const Key& key) {
- // Bail if the tree is empty
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key to the top.
- Splay(key);
- // Bail if the key is not in the tree
- if (C::Compare(key, root_->key_) != 0)
- return false;
- if (root_->left_ == NULL) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
- return true;
-}
-
-
-template <typename C>
-void ZoneSplayTree<C>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(C::kNoKey, C::kNoValue);
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = C::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == NULL)
- break;
- if (C::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == NULL)
- break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == NULL)
- break;
- if (C::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == NULL)
- break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Config> template <class Callback>
-void ZoneSplayTree<Config>::ForEach(Callback* callback) {
- // Pre-allocate some space for tiny trees.
- ZoneList<Node*> nodes_to_visit(10);
- nodes_to_visit.Add(root_);
- int pos = 0;
- while (pos < nodes_to_visit.length()) {
- Node* node = nodes_to_visit[pos++];
- if (node == NULL) continue;
- callback->Call(node->key(), node->value());
- nodes_to_visit.Add(node->left());
- nodes_to_visit.Add(node->right());
- }
+template <typename Config>
+ZoneSplayTree<Config>::~ZoneSplayTree() {
+ // Reset the root to avoid unneeded iteration over all tree nodes
+ // in the destructor. For a zone-allocated tree, nodes will be
+ // freed by the Zone.
+ SplayTree<Config, ZoneListAllocationPolicy>::ResetRoot();
}
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 33fe5571f1..01df4504fe 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "zone-inl.h"
+#include "splay-tree-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 0d006dddf2..3397356223 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -205,98 +205,14 @@ class ZoneScope BASE_EMBEDDED {
// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
+// different configurations of a concrete splay tree (see splay-tree.h).
+// The tree itself and all its elements are allocated in the Zone.
template <typename Config>
-class ZoneSplayTree : public ZoneObject {
+class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
public:
- typedef typename Config::Key Key;
- typedef typename Config::Value Value;
-
- class Locator;
-
- ZoneSplayTree() : root_(NULL) { }
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- bool is_empty() { return root_ == NULL; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node : public ZoneObject {
- public:
- Node(const Key& key, const Value& value)
- : key_(key),
- value_(value),
- left_(NULL),
- right_(NULL) { }
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
- private:
- friend class ZoneSplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator {
- public:
- explicit Locator(Node* node) : node_(node) { }
- Locator() : node_(NULL) { }
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* callback);
-
- private:
- Node* root_;
+ ZoneSplayTree()
+ : SplayTree<Config, ZoneListAllocationPolicy>() {}
+ ~ZoneSplayTree();
};
diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript
index acd567e574..7335b7a5c3 100644
--- a/deps/v8/test/cctest/SConscript
+++ b/deps/v8/test/cctest/SConscript
@@ -40,6 +40,7 @@ SOURCES = {
'test-ast.cc',
'test-compiler.cc',
'test-conversions.cc',
+ 'test-dataflow.cc',
'test-debug.cc',
'test-decls.cc',
'test-flags.cc',
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index ecbafa0ac1..eefe71ca9e 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -27,6 +27,8 @@
prefix cctest
+test-api/Bug*: FAIL
+
# BUG(281): This test fails on some Linuxes.
test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 10a92c38ea..e996a07303 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -264,6 +264,25 @@ THREADED_TEST(Access) {
}
+THREADED_TEST(AccessElement) {
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::Object> obj = v8::Object::New();
+ Local<Value> before = obj->Get(1);
+ CHECK(before->IsUndefined());
+ Local<String> bar_str = v8_str("bar");
+ obj->Set(1, bar_str);
+ Local<Value> after = obj->Get(1);
+ CHECK(!after->IsUndefined());
+ CHECK(after->IsString());
+ CHECK_EQ(bar_str, after);
+
+ Local<v8::Array> value = CompileRun("[\"a\", \"b\"]").As<v8::Array>();
+ CHECK_EQ(v8_str("a"), value->Get(0));
+ CHECK_EQ(v8_str("b"), value->Get(1));
+}
+
+
THREADED_TEST(Script) {
v8::HandleScope scope;
LocalContext env;
@@ -1254,7 +1273,7 @@ static v8::Handle<Value> CallFunctionRecursivelyCall(
args.This()->Set(v8_str("depth"), v8::Integer::New(depth + 1));
v8::Handle<Value> function =
args.This()->Get(v8_str("callFunctionRecursively"));
- return v8::Handle<Function>::Cast(function)->Call(args.This(), 0, NULL);
+ return function.As<Function>()->Call(args.This(), 0, NULL);
}
@@ -1334,6 +1353,20 @@ THREADED_TEST(InternalFields) {
}
+THREADED_TEST(GlobalObjectInternalFields) {
+ v8::HandleScope scope;
+ Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->SetInternalFieldCount(1);
+ LocalContext env(NULL, global_template);
+ v8::Handle<v8::Object> global_proxy = env->Global();
+ v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+ CHECK_EQ(1, global->InternalFieldCount());
+ CHECK(global->GetInternalField(0)->IsUndefined());
+ global->SetInternalField(0, v8_num(17));
+ CHECK_EQ(17, global->GetInternalField(0)->Int32Value());
+}
+
+
THREADED_TEST(InternalFieldsNativePointers) {
v8::HandleScope scope;
LocalContext env;
@@ -1514,7 +1547,7 @@ THREADED_TEST(External) {
LocalContext env;
env->Global()->Set(v8_str("ext"), ext);
Local<Value> reext_obj = Script::Compile(v8_str("this.ext"))->Run();
- v8::Handle<v8::External> reext = v8::Handle<v8::External>::Cast(reext_obj);
+ v8::Handle<v8::External> reext = reext_obj.As<v8::External>();
int* ptr = static_cast<int*>(reext->Value());
CHECK_EQ(x, 3);
*ptr = 10;
@@ -1646,22 +1679,22 @@ THREADED_TEST(Array) {
LocalContext context;
Local<v8::Array> array = v8::Array::New();
CHECK_EQ(0, array->Length());
- CHECK(array->Get(v8::Integer::New(0))->IsUndefined());
+ CHECK(array->Get(0)->IsUndefined());
CHECK(!array->Has(0));
- CHECK(array->Get(v8::Integer::New(100))->IsUndefined());
+ CHECK(array->Get(100)->IsUndefined());
CHECK(!array->Has(100));
- array->Set(v8::Integer::New(2), v8_num(7));
+ array->Set(2, v8_num(7));
CHECK_EQ(3, array->Length());
CHECK(!array->Has(0));
CHECK(!array->Has(1));
CHECK(array->Has(2));
- CHECK_EQ(7, array->Get(v8::Integer::New(2))->Int32Value());
+ CHECK_EQ(7, array->Get(2)->Int32Value());
Local<Value> obj = Script::Compile(v8_str("[1, 2, 3]"))->Run();
- Local<v8::Array> arr = Local<v8::Array>::Cast(obj);
+ Local<v8::Array> arr = obj.As<v8::Array>();
CHECK_EQ(3, arr->Length());
- CHECK_EQ(1, arr->Get(v8::Integer::New(0))->Int32Value());
- CHECK_EQ(2, arr->Get(v8::Integer::New(1))->Int32Value());
- CHECK_EQ(3, arr->Get(v8::Integer::New(2))->Int32Value());
+ CHECK_EQ(1, arr->Get(0)->Int32Value());
+ CHECK_EQ(2, arr->Get(1)->Int32Value());
+ CHECK_EQ(3, arr->Get(2)->Int32Value());
}
@@ -1670,7 +1703,7 @@ v8::Handle<Value> HandleF(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
Local<v8::Array> result = v8::Array::New(args.Length());
for (int i = 0; i < args.Length(); i++)
- result->Set(v8::Integer::New(i), args[i]);
+ result->Set(i, args[i]);
return scope.Close(result);
}
@@ -1682,39 +1715,34 @@ THREADED_TEST(Vector) {
LocalContext context(0, global);
const char* fun = "f()";
- Local<v8::Array> a0 =
- Local<v8::Array>::Cast(Script::Compile(String::New(fun))->Run());
+ Local<v8::Array> a0 = CompileRun(fun).As<v8::Array>();
CHECK_EQ(0, a0->Length());
const char* fun2 = "f(11)";
- Local<v8::Array> a1 =
- Local<v8::Array>::Cast(Script::Compile(String::New(fun2))->Run());
+ Local<v8::Array> a1 = CompileRun(fun2).As<v8::Array>();
CHECK_EQ(1, a1->Length());
- CHECK_EQ(11, a1->Get(v8::Integer::New(0))->Int32Value());
+ CHECK_EQ(11, a1->Get(0)->Int32Value());
const char* fun3 = "f(12, 13)";
- Local<v8::Array> a2 =
- Local<v8::Array>::Cast(Script::Compile(String::New(fun3))->Run());
+ Local<v8::Array> a2 = CompileRun(fun3).As<v8::Array>();
CHECK_EQ(2, a2->Length());
- CHECK_EQ(12, a2->Get(v8::Integer::New(0))->Int32Value());
- CHECK_EQ(13, a2->Get(v8::Integer::New(1))->Int32Value());
+ CHECK_EQ(12, a2->Get(0)->Int32Value());
+ CHECK_EQ(13, a2->Get(1)->Int32Value());
const char* fun4 = "f(14, 15, 16)";
- Local<v8::Array> a3 =
- Local<v8::Array>::Cast(Script::Compile(String::New(fun4))->Run());
+ Local<v8::Array> a3 = CompileRun(fun4).As<v8::Array>();
CHECK_EQ(3, a3->Length());
- CHECK_EQ(14, a3->Get(v8::Integer::New(0))->Int32Value());
- CHECK_EQ(15, a3->Get(v8::Integer::New(1))->Int32Value());
- CHECK_EQ(16, a3->Get(v8::Integer::New(2))->Int32Value());
+ CHECK_EQ(14, a3->Get(0)->Int32Value());
+ CHECK_EQ(15, a3->Get(1)->Int32Value());
+ CHECK_EQ(16, a3->Get(2)->Int32Value());
const char* fun5 = "f(17, 18, 19, 20)";
- Local<v8::Array> a4 =
- Local<v8::Array>::Cast(Script::Compile(String::New(fun5))->Run());
+ Local<v8::Array> a4 = CompileRun(fun5).As<v8::Array>();
CHECK_EQ(4, a4->Length());
- CHECK_EQ(17, a4->Get(v8::Integer::New(0))->Int32Value());
- CHECK_EQ(18, a4->Get(v8::Integer::New(1))->Int32Value());
- CHECK_EQ(19, a4->Get(v8::Integer::New(2))->Int32Value());
- CHECK_EQ(20, a4->Get(v8::Integer::New(3))->Int32Value());
+ CHECK_EQ(17, a4->Get(0)->Int32Value());
+ CHECK_EQ(18, a4->Get(1)->Int32Value());
+ CHECK_EQ(19, a4->Get(2)->Int32Value());
+ CHECK_EQ(20, a4->Get(3)->Int32Value());
}
@@ -1932,6 +1960,95 @@ static void CheckUncle(v8::TryCatch* try_catch) {
}
+THREADED_TEST(ConversionNumber) {
+ v8::HandleScope scope;
+ LocalContext env;
+ // Very large number.
+ CompileRun("var obj = Math.pow(2,32) * 1237;");
+ Local<Value> obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(5312874545152.0, obj->ToNumber()->Value());
+ CHECK_EQ(0, obj->ToInt32()->Value());
+ CHECK(0u == obj->ToUint32()->Value()); // NOLINT - no CHECK_EQ for unsigned.
+ // Large number.
+ CompileRun("var obj = -1234567890123;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(-1234567890123.0, obj->ToNumber()->Value());
+ CHECK_EQ(-1912276171, obj->ToInt32()->Value());
+ CHECK(2382691125u == obj->ToUint32()->Value()); // NOLINT
+ // Small positive integer.
+ CompileRun("var obj = 42;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(42.0, obj->ToNumber()->Value());
+ CHECK_EQ(42, obj->ToInt32()->Value());
+ CHECK(42u == obj->ToUint32()->Value()); // NOLINT
+ // Negative integer.
+ CompileRun("var obj = -37;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(-37.0, obj->ToNumber()->Value());
+ CHECK_EQ(-37, obj->ToInt32()->Value());
+ CHECK(4294967259u == obj->ToUint32()->Value()); // NOLINT
+ // Positive non-int32 integer.
+ CompileRun("var obj = 0x81234567;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(2166572391.0, obj->ToNumber()->Value());
+ CHECK_EQ(-2128394905, obj->ToInt32()->Value());
+ CHECK(2166572391u == obj->ToUint32()->Value()); // NOLINT
+ // Fraction.
+ CompileRun("var obj = 42.3;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(42.3, obj->ToNumber()->Value());
+ CHECK_EQ(42, obj->ToInt32()->Value());
+ CHECK(42u == obj->ToUint32()->Value()); // NOLINT
+ // Large negative fraction.
+ CompileRun("var obj = -5726623061.75;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK_EQ(-5726623061.75, obj->ToNumber()->Value());
+ CHECK_EQ(-1431655765, obj->ToInt32()->Value());
+ CHECK(2863311531u == obj->ToUint32()->Value()); // NOLINT
+}
+
+
+THREADED_TEST(isNumberType) {
+ v8::HandleScope scope;
+ LocalContext env;
+ // Very large number.
+ CompileRun("var obj = Math.pow(2,32) * 1237;");
+ Local<Value> obj = env->Global()->Get(v8_str("obj"));
+ CHECK(!obj->IsInt32());
+ CHECK(!obj->IsUint32());
+ // Large negative number.
+ CompileRun("var obj = -1234567890123;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(!obj->IsInt32());
+ CHECK(!obj->IsUint32());
+ // Small positive integer.
+ CompileRun("var obj = 42;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(obj->IsInt32());
+ CHECK(obj->IsUint32());
+ // Negative integer.
+ CompileRun("var obj = -37;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(obj->IsInt32());
+ CHECK(!obj->IsUint32());
+ // Positive non-int32 integer.
+ CompileRun("var obj = 0x81234567;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(!obj->IsInt32());
+ CHECK(obj->IsUint32());
+ // Fraction.
+ CompileRun("var obj = 42.3;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(!obj->IsInt32());
+ CHECK(!obj->IsUint32());
+ // Large negative fraction.
+ CompileRun("var obj = -5726623061.75;");
+ obj = env->Global()->Get(v8_str("obj"));
+ CHECK(!obj->IsInt32());
+ CHECK(!obj->IsUint32());
+}
+
+
THREADED_TEST(ConversionException) {
v8::HandleScope scope;
LocalContext env;
@@ -2130,8 +2247,7 @@ v8::Handle<Value> CThrowCountDown(const v8::Arguments& args) {
args[3] };
if (count % cInterval == 0) {
v8::TryCatch try_catch;
- Local<Value> result =
- v8::Handle<Function>::Cast(fun)->Call(global, 4, argv);
+ Local<Value> result = fun.As<Function>()->Call(global, 4, argv);
int expected = args[3]->Int32Value();
if (try_catch.HasCaught()) {
CHECK_EQ(expected, count);
@@ -2142,7 +2258,7 @@ v8::Handle<Value> CThrowCountDown(const v8::Arguments& args) {
}
return result;
} else {
- return v8::Handle<Function>::Cast(fun)->Call(global, 4, argv);
+ return fun.As<Function>()->Call(global, 4, argv);
}
}
}
@@ -2532,7 +2648,7 @@ static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property,
const AccessorInfo& info) {
// Set x on the prototype object and do not handle the get request.
v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
- v8::Handle<v8::Object>::Cast(proto)->Set(v8_str("x"), v8::Integer::New(23));
+ proto.As<v8::Object>()->Set(v8_str("x"), v8::Integer::New(23));
return v8::Handle<Value>();
}
@@ -2868,22 +2984,22 @@ THREADED_TEST(FunctionPrototypeAcrossContexts) {
v8::Handle<v8::Object> global0 =
env0->Global();
v8::Handle<v8::Object> object0 =
- v8::Handle<v8::Object>::Cast(global0->Get(v8_str("Object")));
+ global0->Get(v8_str("Object")).As<v8::Object>();
v8::Handle<v8::Object> tostring0 =
- v8::Handle<v8::Object>::Cast(object0->Get(v8_str("toString")));
+ object0->Get(v8_str("toString")).As<v8::Object>();
v8::Handle<v8::Object> proto0 =
- v8::Handle<v8::Object>::Cast(tostring0->Get(v8_str("__proto__")));
+ tostring0->Get(v8_str("__proto__")).As<v8::Object>();
proto0->Set(v8_str("custom"), v8_num(1234));
LocalContext env1;
v8::Handle<v8::Object> global1 =
env1->Global();
v8::Handle<v8::Object> object1 =
- v8::Handle<v8::Object>::Cast(global1->Get(v8_str("Object")));
+ global1->Get(v8_str("Object")).As<v8::Object>();
v8::Handle<v8::Object> tostring1 =
- v8::Handle<v8::Object>::Cast(object1->Get(v8_str("toString")));
+ object1->Get(v8_str("toString")).As<v8::Object>();
v8::Handle<v8::Object> proto1 =
- v8::Handle<v8::Object>::Cast(tostring1->Get(v8_str("__proto__")));
+ tostring1->Get(v8_str("__proto__")).As<v8::Object>();
CHECK(!proto1->Has(v8_str("custom")));
}
@@ -3505,7 +3621,7 @@ THREADED_TEST(Arguments) {
v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New();
global->Set(v8_str("f"), v8::FunctionTemplate::New(ArgumentsTestCallback));
LocalContext context(NULL, global);
- args_fun = v8::Handle<Function>::Cast(context->Global()->Get(v8_str("f")));
+ args_fun = context->Global()->Get(v8_str("f")).As<Function>();
v8_compile("f(1, 2, 3)")->Run();
}
@@ -3843,21 +3959,20 @@ THREADED_TEST(ErrorConstruction) {
v8::Handle<String> message = v8_str("message");
v8::Handle<Value> range_error = v8::Exception::RangeError(foo);
CHECK(range_error->IsObject());
- v8::Handle<v8::Object> range_obj(v8::Handle<v8::Object>::Cast(range_error));
- CHECK(v8::Handle<v8::Object>::Cast(range_error)->Get(message)->Equals(foo));
+ v8::Handle<v8::Object> range_obj = range_error.As<v8::Object>();
+ CHECK(range_error.As<v8::Object>()->Get(message)->Equals(foo));
v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo);
CHECK(reference_error->IsObject());
- CHECK(
- v8::Handle<v8::Object>::Cast(reference_error)->Get(message)->Equals(foo));
+ CHECK(reference_error.As<v8::Object>()->Get(message)->Equals(foo));
v8::Handle<Value> syntax_error = v8::Exception::SyntaxError(foo);
CHECK(syntax_error->IsObject());
- CHECK(v8::Handle<v8::Object>::Cast(syntax_error)->Get(message)->Equals(foo));
+ CHECK(syntax_error.As<v8::Object>()->Get(message)->Equals(foo));
v8::Handle<Value> type_error = v8::Exception::TypeError(foo);
CHECK(type_error->IsObject());
- CHECK(v8::Handle<v8::Object>::Cast(type_error)->Get(message)->Equals(foo));
+ CHECK(type_error.As<v8::Object>()->Get(message)->Equals(foo));
v8::Handle<Value> error = v8::Exception::Error(foo);
CHECK(error->IsObject());
- CHECK(v8::Handle<v8::Object>::Cast(error)->Get(message)->Equals(foo));
+ CHECK(error.As<v8::Object>()->Get(message)->Equals(foo));
}
@@ -4780,13 +4895,13 @@ static bool NamedAccessFlatten(Local<v8::Object> global,
CHECK(name->IsString());
memset(buf, 0x1, sizeof(buf));
- len = Local<String>::Cast(name)->WriteAscii(buf);
+ len = name.As<String>()->WriteAscii(buf);
CHECK_EQ(4, len);
uint16_t buf2[100];
memset(buf, 0x1, sizeof(buf));
- len = Local<String>::Cast(name)->Write(buf2);
+ len = name.As<String>()->Write(buf2);
CHECK_EQ(4, len);
return true;
@@ -5135,7 +5250,7 @@ THREADED_TEST(HiddenPrototype) {
// object.
Local<Value> proto = o0->Get(v8_str("__proto__"));
CHECK(proto->IsObject());
- CHECK(Local<v8::Object>::Cast(proto)->Get(v8_str("z"))->IsUndefined());
+ CHECK(proto.As<v8::Object>()->Get(v8_str("z"))->IsUndefined());
}
@@ -5179,20 +5294,20 @@ THREADED_TEST(SetPrototype) {
// object.
Local<Value> proto = o0->Get(v8_str("__proto__"));
CHECK(proto->IsObject());
- CHECK_EQ(v8::Handle<v8::Object>::Cast(proto), o3);
+ CHECK_EQ(proto.As<v8::Object>(), o3);
// However, Object::GetPrototype ignores hidden prototype.
Local<Value> proto0 = o0->GetPrototype();
CHECK(proto0->IsObject());
- CHECK_EQ(v8::Handle<v8::Object>::Cast(proto0), o1);
+ CHECK_EQ(proto0.As<v8::Object>(), o1);
Local<Value> proto1 = o1->GetPrototype();
CHECK(proto1->IsObject());
- CHECK_EQ(v8::Handle<v8::Object>::Cast(proto1), o2);
+ CHECK_EQ(proto1.As<v8::Object>(), o2);
Local<Value> proto2 = o2->GetPrototype();
CHECK(proto2->IsObject());
- CHECK_EQ(v8::Handle<v8::Object>::Cast(proto2), o3);
+ CHECK_EQ(proto2.As<v8::Object>(), o3);
}
@@ -6904,7 +7019,7 @@ THREADED_TEST(ObjectProtoToString) {
// Check ordinary object
Local<Value> object = v8_compile("new Object()")->Run();
- value = Local<v8::Object>::Cast(object)->ObjectProtoToString();
+ value = object.As<v8::Object>()->ObjectProtoToString();
CHECK(value->IsString() && value->Equals(v8_str("[object Object]")));
}
@@ -7524,12 +7639,12 @@ THREADED_TEST(DateAccess) {
LocalContext context;
v8::Handle<v8::Value> date = v8::Date::New(1224744689038.0);
CHECK(date->IsDate());
- CHECK_EQ(1224744689038.0, v8::Handle<v8::Date>::Cast(date)->NumberValue());
+ CHECK_EQ(1224744689038.0, date.As<v8::Date>()->NumberValue());
}
void CheckProperties(v8::Handle<v8::Value> val, int elmc, const char* elmv[]) {
- v8::Handle<v8::Object> obj = v8::Handle<v8::Object>::Cast(val);
+ v8::Handle<v8::Object> obj = val.As<v8::Object>();
v8::Handle<v8::Array> props = obj->GetPropertyNames();
CHECK_EQ(elmc, props->Length());
for (int i = 0; i < elmc; i++) {
@@ -7551,7 +7666,7 @@ THREADED_TEST(PropertyEnumeration) {
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
"result;"))->Run();
- v8::Handle<v8::Array> elms = v8::Handle<v8::Array>::Cast(obj);
+ v8::Handle<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
@@ -8073,7 +8188,7 @@ TEST(ObjectClone) {
// Create an object, verify basics.
Local<Value> val = CompileRun(sample);
CHECK(val->IsObject());
- Local<v8::Object> obj = Local<v8::Object>::Cast(val);
+ Local<v8::Object> obj = val.As<v8::Object>();
obj->Set(v8_str("gamma"), v8_str("cloneme"));
CHECK_EQ(v8_str("hello"), obj->Get(v8_str("alpha")));
@@ -9714,7 +9829,7 @@ static void SetterWhichSetsYOnThisTo23(Local<String> name,
}
-THREADED_TEST(SetterOnConstructorPrototype) {
+TEST(SetterOnConstructorPrototype) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"),
@@ -9796,3 +9911,47 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
CHECK_EQ(42, c2->Get(v8_str("y"))->Int32Value());
}
}
+
+
+TEST(Bug618) {
+ const char* source = "function C1() {"
+ " this.x = 23;"
+ "};"
+ "C1.prototype = P;";
+
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Local<v8::Script> script;
+
+ // Use a simple object as prototype.
+ v8::Local<v8::Object> prototype = v8::Object::New();
+ prototype->Set(v8_str("y"), v8_num(42));
+ context->Global()->Set(v8_str("P"), prototype);
+
+ // This compile will add the code to the compilation cache.
+ CompileRun(source);
+
+ script = v8::Script::Compile(v8_str("new C1();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
+ }
+
+ // Use an API object with accessors as prototype.
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"),
+ GetterWhichReturns42,
+ SetterWhichSetsYOnThisTo23);
+ context->Global()->Set(v8_str("P"), templ->NewInstance());
+
+ // This compile will get the code from the compilation cache.
+ CompileRun(source);
+
+ script = v8::Script::Compile(v8_str("new C1();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-dataflow.cc b/deps/v8/test/cctest/test-dataflow.cc
new file mode 100644
index 0000000000..003ac6680b
--- /dev/null
+++ b/deps/v8/test/cctest/test-dataflow.cc
@@ -0,0 +1,103 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "data-flow.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+TEST(BitVector) {
+ ZoneScope zone(DELETE_ON_EXIT);
+ {
+ BitVector v(15);
+ v.Add(1);
+ CHECK(v.Contains(1));
+ v.Remove(0);
+ CHECK(!v.Contains(0));
+ v.Add(0);
+ v.Add(1);
+ BitVector w(15);
+ w.Add(1);
+ v.Intersect(w);
+ CHECK(!v.Contains(0));
+ CHECK(v.Contains(1));
+ }
+
+ {
+ BitVector v(15);
+ v.Add(0);
+ BitVector w(15);
+ w.Add(1);
+ v.Union(w);
+ CHECK(v.Contains(0));
+ CHECK(v.Contains(1));
+ }
+
+ {
+ BitVector v(15);
+ v.Add(0);
+ BitVector w(15);
+ w = v;
+ CHECK(w.Contains(0));
+ w.Add(1);
+ BitVector u(w);
+ CHECK(u.Contains(0));
+ CHECK(u.Contains(1));
+ v.Union(w);
+ CHECK(v.Contains(0));
+ CHECK(v.Contains(1));
+ }
+
+ {
+ BitVector v(35);
+ v.Add(0);
+ BitVector w(35);
+ w.Add(33);
+ v.Union(w);
+ CHECK(v.Contains(0));
+ CHECK(v.Contains(33));
+ }
+
+ {
+ BitVector v(35);
+ v.Add(32);
+ v.Add(33);
+ BitVector w(35);
+ w.Add(33);
+ v.Intersect(w);
+ CHECK(!v.Contains(32));
+ CHECK(v.Contains(33));
+ BitVector r(35);
+ r.CopyFrom(v);
+ CHECK(!r.Contains(32));
+ CHECK(r.Contains(33));
+ }
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index b7c39b226d..d0726b9bdc 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -5958,7 +5958,7 @@ TEST(ProcessDebugMessages) {
}
-struct BracktraceData {
+struct BacktraceData {
static int frame_counter;
static void MessageHandler(const v8::Debug::Message& message) {
char print_buffer[1000];
@@ -5972,7 +5972,7 @@ struct BracktraceData {
}
};
-int BracktraceData::frame_counter;
+int BacktraceData::frame_counter;
// Test that debug messages get processed when ProcessDebugMessages is called.
@@ -5980,7 +5980,7 @@ TEST(Backtrace) {
v8::HandleScope scope;
DebugLocalContext env;
- v8::Debug::SetMessageHandler2(BracktraceData::MessageHandler);
+ v8::Debug::SetMessageHandler2(BacktraceData::MessageHandler);
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
@@ -5990,19 +5990,19 @@ TEST(Backtrace) {
"\"command\":\"backtrace\"}";
// Check backtrace from ProcessDebugMessages.
- BracktraceData::frame_counter = -10;
+ BacktraceData::frame_counter = -10;
v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
v8::Debug::ProcessDebugMessages();
- CHECK_EQ(BracktraceData::frame_counter, 0);
+ CHECK_EQ(BacktraceData::frame_counter, 0);
v8::Handle<v8::String> void0 = v8::String::New("void(0)");
v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
// Check backtrace from "void(0)" script.
- BracktraceData::frame_counter = -10;
+ BacktraceData::frame_counter = -10;
v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
script->Run();
- CHECK_EQ(BracktraceData::frame_counter, 1);
+ CHECK_EQ(BacktraceData::frame_counter, 1);
// Get rid of the debug message handler.
v8::Debug::SetMessageHandler2(NULL);
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 7b0ad99e80..5a64f0fdfd 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -234,7 +234,9 @@ TEST(DisasmIa320) {
__ imul(edx, ecx, 12);
__ imul(edx, ecx, 1000);
+ __ cld();
__ rep_movs();
+ __ rep_stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, Operand(ebx));
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 295b0ee060..2e568946a7 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -8,6 +8,7 @@
#include "heap-profiler.h"
#include "string-stream.h"
#include "cctest.h"
+#include "zone-inl.h"
namespace i = v8::internal;
using i::ClustersCoarser;
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index d36286bb62..45c516038d 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -149,14 +149,9 @@ TEST(HeapObjects) {
CHECK(Heap::nan_value()->IsNumber());
CHECK(isnan(Heap::nan_value()->Number()));
- Object* str = Heap::AllocateStringFromAscii(CStrVector("fisk hest "));
- if (!str->IsFailure()) {
- String* s = String::cast(str);
- CHECK(s->IsString());
- CHECK_EQ(10, s->length());
- } else {
- CHECK(false);
- }
+ Handle<String> s = Factory::NewStringFromAscii(CStrVector("fisk hest "));
+ CHECK(s->IsString());
+ CHECK_EQ(10, s->length());
String* object_symbol = String::cast(Heap::Object_symbol());
CHECK(Top::context()->global()->HasLocalProperty(object_symbol));
@@ -201,69 +196,68 @@ TEST(GarbageCollection) {
InitializeVM();
v8::HandleScope sc;
- // check GC when heap is empty
+ // Check GC.
int free_bytes = Heap::MaxObjectSizeInPagedSpace();
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
- // allocate a function and keep it in global object's property
- String* func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
- SharedFunctionInfo* function_share =
- SharedFunctionInfo::cast(Heap::AllocateSharedFunctionInfo(func_name));
- JSFunction* function =
- JSFunction::cast(Heap::AllocateFunction(*Top::function_map(),
- function_share,
- Heap::undefined_value()));
- Map* initial_map =
- Map::cast(Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize));
- function->set_initial_map(initial_map);
- Top::context()->global()->SetProperty(func_name, function, NONE);
-
- // allocate an object, but it is unrooted
- String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
- String* prop_namex = String::cast(Heap::LookupAsciiSymbol("theSlotx"));
- JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
- obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
- obj->SetProperty(prop_namex, Smi::FromInt(24), NONE);
-
- CHECK_EQ(Smi::FromInt(23), obj->GetProperty(prop_name));
- CHECK_EQ(Smi::FromInt(24), obj->GetProperty(prop_namex));
+ Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
+ Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
+ Handle<String> prop_namex = Factory::LookupAsciiSymbol("theSlotx");
+ Handle<String> obj_name = Factory::LookupAsciiSymbol("theObject");
+
+ {
+ v8::HandleScope inner_scope;
+ // Allocate a function and keep it in global object's property.
+ Handle<JSFunction> function =
+ Factory::NewFunction(name, Factory::undefined_value());
+ Handle<Map> initial_map =
+ Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ function->set_initial_map(*initial_map);
+ Top::context()->global()->SetProperty(*name, *function, NONE);
+ // Allocate an object. Unrooted after leaving the scope.
+ Handle<JSObject> obj = Factory::NewJSObject(function);
+ obj->SetProperty(*prop_name, Smi::FromInt(23), NONE);
+ obj->SetProperty(*prop_namex, Smi::FromInt(24), NONE);
+
+ CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
+ CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
+ }
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
- // function should be alive, func_name might be invalid after GC
- func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
- CHECK(Top::context()->global()->HasLocalProperty(func_name));
- // check function is retained
- Object* func_value = Top::context()->global()->GetProperty(func_name);
+ // Function should be alive.
+ CHECK(Top::context()->global()->HasLocalProperty(*name));
+ // Check function is retained.
+ Object* func_value = Top::context()->global()->GetProperty(*name);
CHECK(func_value->IsJSFunction());
- // old function pointer may not be valid
- function = JSFunction::cast(func_value);
-
- // allocate another object, make it reachable from global
- obj = JSObject::cast(Heap::AllocateJSObject(function));
- String* obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
- Top::context()->global()->SetProperty(obj_name, obj, NONE);
- // set property
- prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
- obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
+ Handle<JSFunction> function(JSFunction::cast(func_value));
+
+ {
+ HandleScope inner_scope;
+ // Allocate another object, make it reachable from global.
+ Handle<JSObject> obj = Factory::NewJSObject(function);
+ Top::context()->global()->SetProperty(*obj_name, *obj, NONE);
+ obj->SetProperty(*prop_name, Smi::FromInt(23), NONE);
+ }
- // after gc, it should survive
+ // After gc, it should survive.
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
- obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
- CHECK(Top::context()->global()->HasLocalProperty(obj_name));
- CHECK(Top::context()->global()->GetProperty(obj_name)->IsJSObject());
- obj = JSObject::cast(Top::context()->global()->GetProperty(obj_name));
- prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
- CHECK_EQ(Smi::FromInt(23), obj->GetProperty(prop_name));
+ CHECK(Top::context()->global()->HasLocalProperty(*obj_name));
+ CHECK(Top::context()->global()->GetProperty(*obj_name)->IsJSObject());
+ JSObject* obj =
+ JSObject::cast(Top::context()->global()->GetProperty(*obj_name));
+ CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
}
static void VerifyStringAllocation(const char* string) {
- String* s = String::cast(Heap::AllocateStringFromUtf8(CStrVector(string)));
+ v8::HandleScope scope;
+ Handle<String> s = Factory::NewStringFromUtf8(CStrVector(string));
CHECK_EQ(StrLength(string), s->length());
for (int index = 0; index < s->length(); index++) {
- CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index)); }
+ CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index));
+ }
}
@@ -291,13 +285,22 @@ TEST(LocalHandles) {
TEST(GlobalHandles) {
InitializeVM();
- Object* i = Heap::AllocateStringFromAscii(CStrVector("fisk"));
- Object* u = Heap::AllocateHeapNumber(1.12344);
+ Handle<Object> h1;
+ Handle<Object> h2;
+ Handle<Object> h3;
+ Handle<Object> h4;
+
+ {
+ HandleScope scope;
- Handle<Object> h1 = GlobalHandles::Create(i);
- Handle<Object> h2 = GlobalHandles::Create(u);
- Handle<Object> h3 = GlobalHandles::Create(i);
- Handle<Object> h4 = GlobalHandles::Create(u);
+ Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = Factory::NewNumber(1.12344);
+
+ h1 = GlobalHandles::Create(*i);
+ h2 = GlobalHandles::Create(*u);
+ h3 = GlobalHandles::Create(*i);
+ h4 = GlobalHandles::Create(*u);
+ }
// after gc, it should survive
CHECK(Heap::CollectGarbage(0, NEW_SPACE));
@@ -331,11 +334,18 @@ TEST(WeakGlobalHandlesScavenge) {
WeakPointerCleared = false;
- Object* i = Heap::AllocateStringFromAscii(CStrVector("fisk"));
- Object* u = Heap::AllocateHeapNumber(1.12344);
+ Handle<Object> h1;
+ Handle<Object> h2;
+
+ {
+ HandleScope scope;
- Handle<Object> h1 = GlobalHandles::Create(i);
- Handle<Object> h2 = GlobalHandles::Create(u);
+ Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = Factory::NewNumber(1.12344);
+
+ h1 = GlobalHandles::Create(*i);
+ h2 = GlobalHandles::Create(*u);
+ }
GlobalHandles::MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
@@ -361,11 +371,18 @@ TEST(WeakGlobalHandlesMark) {
WeakPointerCleared = false;
- Object* i = Heap::AllocateStringFromAscii(CStrVector("fisk"));
- Object* u = Heap::AllocateHeapNumber(1.12344);
+ Handle<Object> h1;
+ Handle<Object> h2;
+
+ {
+ HandleScope scope;
- Handle<Object> h1 = GlobalHandles::Create(i);
- Handle<Object> h2 = GlobalHandles::Create(u);
+ Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = Factory::NewNumber(1.12344);
+
+ h1 = GlobalHandles::Create(*i);
+ h2 = GlobalHandles::Create(*u);
+ }
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
CHECK(Heap::CollectGarbage(0, NEW_SPACE));
@@ -401,8 +418,14 @@ TEST(DeleteWeakGlobalHandle) {
WeakPointerCleared = false;
- Object* i = Heap::AllocateStringFromAscii(CStrVector("fisk"));
- Handle<Object> h = GlobalHandles::Create(i);
+ Handle<Object> h;
+
+ {
+ HandleScope scope;
+
+ Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
+ h = GlobalHandles::Create(*i);
+ }
GlobalHandles::MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
@@ -509,24 +532,20 @@ TEST(FunctionAllocation) {
InitializeVM();
v8::HandleScope sc;
- String* name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
- SharedFunctionInfo* function_share =
- SharedFunctionInfo::cast(Heap::AllocateSharedFunctionInfo(name));
- JSFunction* function =
- JSFunction::cast(Heap::AllocateFunction(*Top::function_map(),
- function_share,
- Heap::undefined_value()));
- Map* initial_map =
- Map::cast(Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize));
- function->set_initial_map(initial_map);
-
- String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
- JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
- obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
- CHECK_EQ(Smi::FromInt(23), obj->GetProperty(prop_name));
+ Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
+ Handle<JSFunction> function =
+ Factory::NewFunction(name, Factory::undefined_value());
+ Handle<Map> initial_map =
+ Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ function->set_initial_map(*initial_map);
+
+ Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
+ Handle<JSObject> obj = Factory::NewJSObject(function);
+ obj->SetProperty(*prop_name, Smi::FromInt(23), NONE);
+ CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check that we can add properties to function objects.
- function->SetProperty(prop_name, Smi::FromInt(24), NONE);
- CHECK_EQ(Smi::FromInt(24), function->GetProperty(prop_name));
+ function->SetProperty(*prop_name, Smi::FromInt(24), NONE);
+ CHECK_EQ(Smi::FromInt(24), function->GetProperty(*prop_name));
}
@@ -534,64 +553,64 @@ TEST(ObjectProperties) {
InitializeVM();
v8::HandleScope sc;
- JSFunction* constructor =
- JSFunction::cast(
- Top::context()->global()->GetProperty(String::cast(
- Heap::Object_symbol())));
- JSObject* obj = JSObject::cast(Heap::AllocateJSObject(constructor));
- String* first = String::cast(Heap::LookupAsciiSymbol("first"));
- String* second = String::cast(Heap::LookupAsciiSymbol("second"));
+ String* object_symbol = String::cast(Heap::Object_symbol());
+ JSFunction* object_function =
+ JSFunction::cast(Top::context()->global()->GetProperty(object_symbol));
+ Handle<JSFunction> constructor(object_function);
+ Handle<JSObject> obj = Factory::NewJSObject(constructor);
+ Handle<String> first = Factory::LookupAsciiSymbol("first");
+ Handle<String> second = Factory::LookupAsciiSymbol("second");
// check for empty
- CHECK(!obj->HasLocalProperty(first));
+ CHECK(!obj->HasLocalProperty(*first));
// add first
- obj->SetProperty(first, Smi::FromInt(1), NONE);
- CHECK(obj->HasLocalProperty(first));
+ obj->SetProperty(*first, Smi::FromInt(1), NONE);
+ CHECK(obj->HasLocalProperty(*first));
// delete first
- CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
- CHECK(!obj->HasLocalProperty(first));
+ CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ CHECK(!obj->HasLocalProperty(*first));
// add first and then second
- obj->SetProperty(first, Smi::FromInt(1), NONE);
- obj->SetProperty(second, Smi::FromInt(2), NONE);
- CHECK(obj->HasLocalProperty(first));
- CHECK(obj->HasLocalProperty(second));
+ obj->SetProperty(*first, Smi::FromInt(1), NONE);
+ obj->SetProperty(*second, Smi::FromInt(2), NONE);
+ CHECK(obj->HasLocalProperty(*first));
+ CHECK(obj->HasLocalProperty(*second));
// delete first and then second
- CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
- CHECK(obj->HasLocalProperty(second));
- CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
- CHECK(!obj->HasLocalProperty(first));
- CHECK(!obj->HasLocalProperty(second));
+ CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ CHECK(obj->HasLocalProperty(*second));
+ CHECK(obj->DeleteProperty(*second, JSObject::NORMAL_DELETION));
+ CHECK(!obj->HasLocalProperty(*first));
+ CHECK(!obj->HasLocalProperty(*second));
// add first and then second
- obj->SetProperty(first, Smi::FromInt(1), NONE);
- obj->SetProperty(second, Smi::FromInt(2), NONE);
- CHECK(obj->HasLocalProperty(first));
- CHECK(obj->HasLocalProperty(second));
+ obj->SetProperty(*first, Smi::FromInt(1), NONE);
+ obj->SetProperty(*second, Smi::FromInt(2), NONE);
+ CHECK(obj->HasLocalProperty(*first));
+ CHECK(obj->HasLocalProperty(*second));
// delete second and then first
- CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
- CHECK(obj->HasLocalProperty(first));
- CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
- CHECK(!obj->HasLocalProperty(first));
- CHECK(!obj->HasLocalProperty(second));
+ CHECK(obj->DeleteProperty(*second, JSObject::NORMAL_DELETION));
+ CHECK(obj->HasLocalProperty(*first));
+ CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ CHECK(!obj->HasLocalProperty(*first));
+ CHECK(!obj->HasLocalProperty(*second));
// check string and symbol match
static const char* string1 = "fisk";
- String* s1 =
- String::cast(Heap::AllocateStringFromAscii(CStrVector(string1)));
- obj->SetProperty(s1, Smi::FromInt(1), NONE);
- CHECK(obj->HasLocalProperty(String::cast(Heap::LookupAsciiSymbol(string1))));
+ Handle<String> s1 = Factory::NewStringFromAscii(CStrVector(string1));
+ obj->SetProperty(*s1, Smi::FromInt(1), NONE);
+ Handle<String> s1_symbol = Factory::LookupAsciiSymbol(string1);
+ CHECK(obj->HasLocalProperty(*s1_symbol));
// check symbol and string match
static const char* string2 = "fugl";
- String* s2 = String::cast(Heap::LookupAsciiSymbol(string2));
- obj->SetProperty(s2, Smi::FromInt(1), NONE);
- CHECK(obj->HasLocalProperty(
- String::cast(Heap::AllocateStringFromAscii(CStrVector(string2)))));
+ Handle<String> s2_symbol = Factory::LookupAsciiSymbol(string2);
+ obj->SetProperty(*s2_symbol, Smi::FromInt(1), NONE);
+ Handle<String> s2 = Factory::NewStringFromAscii(CStrVector(string2));
+ CHECK(obj->HasLocalProperty(*s2));
}
@@ -599,25 +618,22 @@ TEST(JSObjectMaps) {
InitializeVM();
v8::HandleScope sc;
- String* name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
- SharedFunctionInfo* function_share =
- SharedFunctionInfo::cast(Heap::AllocateSharedFunctionInfo(name));
- JSFunction* function =
- JSFunction::cast(Heap::AllocateFunction(*Top::function_map(),
- function_share,
- Heap::undefined_value()));
- Map* initial_map =
- Map::cast(Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize));
- function->set_initial_map(initial_map);
- String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
- JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
+ Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
+ Handle<JSFunction> function =
+ Factory::NewFunction(name, Factory::undefined_value());
+ Handle<Map> initial_map =
+ Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ function->set_initial_map(*initial_map);
+
+ Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
+ Handle<JSObject> obj = Factory::NewJSObject(function);
// Set a propery
- obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
- CHECK_EQ(Smi::FromInt(23), obj->GetProperty(prop_name));
+ obj->SetProperty(*prop_name, Smi::FromInt(23), NONE);
+ CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check the map has changed
- CHECK(initial_map != obj->map());
+ CHECK(*initial_map != obj->map());
}
@@ -625,12 +641,13 @@ TEST(JSArray) {
InitializeVM();
v8::HandleScope sc;
- String* name = String::cast(Heap::LookupAsciiSymbol("Array"));
- JSFunction* function =
- JSFunction::cast(Top::context()->global()->GetProperty(name));
+ Handle<String> name = Factory::LookupAsciiSymbol("Array");
+ Handle<JSFunction> function = Handle<JSFunction>(
+ JSFunction::cast(Top::context()->global()->GetProperty(*name)));
// Allocate the object.
- JSArray* array = JSArray::cast(Heap::AllocateJSObject(function));
+ Handle<JSObject> object = Factory::NewJSObject(function);
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
array->Initialize(0);
// Set array length to 0.
@@ -639,27 +656,27 @@ TEST(JSArray) {
CHECK(array->HasFastElements()); // Must be in fast mode.
// array[length] = name.
- array->SetElement(0, name);
+ array->SetElement(0, *name);
CHECK_EQ(Smi::FromInt(1), array->length());
- CHECK_EQ(array->GetElement(0), name);
+ CHECK_EQ(array->GetElement(0), *name);
-// Set array length with larger than smi value.
- Object* length =
- Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
- array->SetElementsLength(length);
+ // Set array length with larger than smi value.
+ Handle<Object> length =
+ Factory::NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
+ array->SetElementsLength(*length);
uint32_t int_length = 0;
- CHECK(Array::IndexFromObject(length, &int_length));
- CHECK_EQ(length, array->length());
+ CHECK(Array::IndexFromObject(*length, &int_length));
+ CHECK_EQ(*length, array->length());
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- array->SetElement(int_length, name);
+ array->SetElement(int_length, *name);
uint32_t new_int_length = 0;
CHECK(Array::IndexFromObject(array->length(), &new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
- CHECK_EQ(array->GetElement(int_length), name);
- CHECK_EQ(array->GetElement(0), name);
+ CHECK_EQ(array->GetElement(int_length), *name);
+ CHECK_EQ(array->GetElement(0), *name);
}
@@ -667,41 +684,42 @@ TEST(JSObjectCopy) {
InitializeVM();
v8::HandleScope sc;
- String* name = String::cast(Heap::Object_symbol());
- JSFunction* constructor =
- JSFunction::cast(Top::context()->global()->GetProperty(name));
- JSObject* obj = JSObject::cast(Heap::AllocateJSObject(constructor));
- String* first = String::cast(Heap::LookupAsciiSymbol("first"));
- String* second = String::cast(Heap::LookupAsciiSymbol("second"));
+ String* object_symbol = String::cast(Heap::Object_symbol());
+ JSFunction* object_function =
+ JSFunction::cast(Top::context()->global()->GetProperty(object_symbol));
+ Handle<JSFunction> constructor(object_function);
+ Handle<JSObject> obj = Factory::NewJSObject(constructor);
+ Handle<String> first = Factory::LookupAsciiSymbol("first");
+ Handle<String> second = Factory::LookupAsciiSymbol("second");
- obj->SetProperty(first, Smi::FromInt(1), NONE);
- obj->SetProperty(second, Smi::FromInt(2), NONE);
+ obj->SetProperty(*first, Smi::FromInt(1), NONE);
+ obj->SetProperty(*second, Smi::FromInt(2), NONE);
- obj->SetElement(0, first);
- obj->SetElement(1, second);
+ obj->SetElement(0, *first);
+ obj->SetElement(1, *second);
// Make the clone.
- JSObject* clone = JSObject::cast(Heap::CopyJSObject(obj));
- CHECK(clone != obj);
+ Handle<JSObject> clone = Copy(obj);
+ CHECK(!clone.is_identical_to(obj));
CHECK_EQ(obj->GetElement(0), clone->GetElement(0));
CHECK_EQ(obj->GetElement(1), clone->GetElement(1));
- CHECK_EQ(obj->GetProperty(first), clone->GetProperty(first));
- CHECK_EQ(obj->GetProperty(second), clone->GetProperty(second));
+ CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*first));
+ CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*second));
// Flip the values.
- clone->SetProperty(first, Smi::FromInt(2), NONE);
- clone->SetProperty(second, Smi::FromInt(1), NONE);
+ clone->SetProperty(*first, Smi::FromInt(2), NONE);
+ clone->SetProperty(*second, Smi::FromInt(1), NONE);
- clone->SetElement(0, second);
- clone->SetElement(1, first);
+ clone->SetElement(0, *second);
+ clone->SetElement(1, *first);
CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
- CHECK_EQ(obj->GetProperty(second), clone->GetProperty(first));
- CHECK_EQ(obj->GetProperty(first), clone->GetProperty(second));
+ CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*first));
+ CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*second));
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 9853af3244..4c5101c279 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -174,12 +174,11 @@ namespace {
class ScopedLoggerInitializer {
public:
- explicit ScopedLoggerInitializer(bool log, bool prof_lazy)
- : saved_log_(i::FLAG_log),
- saved_prof_lazy_(i::FLAG_prof_lazy),
+ explicit ScopedLoggerInitializer(bool prof_lazy)
+ : saved_prof_lazy_(i::FLAG_prof_lazy),
saved_prof_(i::FLAG_prof),
saved_prof_auto_(i::FLAG_prof_auto),
- trick_to_run_init_flags_(init_flags_(log, prof_lazy)),
+ trick_to_run_init_flags_(init_flags_(prof_lazy)),
need_to_set_up_logger_(i::V8::IsRunning()),
scope_(),
env_(v8::Context::New()) {
@@ -193,14 +192,12 @@ class ScopedLoggerInitializer {
i::FLAG_prof_lazy = saved_prof_lazy_;
i::FLAG_prof = saved_prof_;
i::FLAG_prof_auto = saved_prof_auto_;
- i::FLAG_log = saved_log_;
}
v8::Handle<v8::Context>& env() { return env_; }
private:
- static bool init_flags_(bool log, bool prof_lazy) {
- i::FLAG_log = log;
+ static bool init_flags_(bool prof_lazy) {
i::FLAG_prof = true;
i::FLAG_prof_lazy = prof_lazy;
i::FLAG_prof_auto = false;
@@ -208,7 +205,6 @@ class ScopedLoggerInitializer {
return prof_lazy;
}
- const bool saved_log_;
const bool saved_prof_lazy_;
const bool saved_prof_;
const bool saved_prof_auto_;
@@ -320,7 +316,7 @@ static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
TEST(ProfLazyMode) {
- ScopedLoggerInitializer initialize_logger(false, true);
+ ScopedLoggerInitializer initialize_logger(true);
// No sampling should happen prior to resuming profiler.
CHECK(!LoggerTestHelper::IsSamplerActive());
@@ -394,19 +390,19 @@ class LoopingThread : public v8::internal::Thread {
class LoopingJsThread : public LoopingThread {
public:
void RunLoop() {
- {
- v8::Locker locker;
- CHECK(v8::internal::ThreadManager::HasId());
- SetV8ThreadId();
- }
+ v8::Locker locker;
+ CHECK(v8::internal::ThreadManager::HasId());
+ SetV8ThreadId();
while (IsRunning()) {
- v8::Locker locker;
v8::HandleScope scope;
v8::Persistent<v8::Context> context = v8::Context::New();
- v8::Context::Scope context_scope(context);
- SignalRunning();
- CompileAndRunScript(
- "var j; for (var i=0; i<10000; ++i) { j = Math.sin(i); }");
+ CHECK(!context.IsEmpty());
+ {
+ v8::Context::Scope context_scope(context);
+ SignalRunning();
+ CompileAndRunScript(
+ "var j; for (var i=0; i<10000; ++i) { j = Math.sin(i); }");
+ }
context.Dispose();
i::OS::Sleep(1);
}
@@ -540,7 +536,7 @@ static v8::Handle<v8::Value> ObjMethod1(const v8::Arguments& args) {
}
TEST(LogCallbacks) {
- ScopedLoggerInitializer initialize_logger(false, false);
+ ScopedLoggerInitializer initialize_logger(false);
LogBufferMatcher matcher;
v8::Persistent<v8::FunctionTemplate> obj =
@@ -590,7 +586,7 @@ static v8::Handle<v8::Value> Prop2Getter(v8::Local<v8::String> property,
}
TEST(LogAccessorCallbacks) {
- ScopedLoggerInitializer initialize_logger(false, false);
+ ScopedLoggerInitializer initialize_logger(false);
LogBufferMatcher matcher;
v8::Persistent<v8::FunctionTemplate> obj =
@@ -625,7 +621,7 @@ TEST(LogAccessorCallbacks) {
TEST(LogTags) {
- ScopedLoggerInitializer initialize_logger(true, false);
+ ScopedLoggerInitializer initialize_logger(false);
LogBufferMatcher matcher;
const char* open_tag = "open-tag,";
@@ -710,6 +706,35 @@ TEST(LogTags) {
}
+TEST(IsLoggingPreserved) {
+ ScopedLoggerInitializer initialize_logger(false);
+
+ CHECK(Logger::is_logging());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK(Logger::is_logging());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK(Logger::is_logging());
+
+ CHECK(Logger::is_logging());
+ Logger::ResumeProfiler(
+ v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
+ CHECK(Logger::is_logging());
+ Logger::PauseProfiler(
+ v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
+ CHECK(Logger::is_logging());
+
+ CHECK(Logger::is_logging());
+ Logger::ResumeProfiler(
+ v8::PROFILER_MODULE_CPU |
+ v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
+ CHECK(Logger::is_logging());
+ Logger::PauseProfiler(
+ v8::PROFILER_MODULE_CPU |
+ v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
+ CHECK(Logger::is_logging());
+}
+
+
static inline bool IsStringEqualTo(const char* r, const char* s) {
return strncmp(r, s, strlen(r)) == 0;
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index c34840ace7..1283dfdac8 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -131,7 +131,7 @@ TEST(ExternalReferenceEncoder) {
ExternalReference::address_of_real_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 11),
+ CHECK_EQ(make_code(UNCLASSIFIED, 12),
encoder.Encode(ExternalReference::debug_break().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 7),
encoder.Encode(ExternalReference::new_space_start().address()));
@@ -165,7 +165,7 @@ TEST(ExternalReferenceDecoder) {
CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 5)));
CHECK_EQ(ExternalReference::debug_break().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 11)));
+ decoder.Decode(make_code(UNCLASSIFIED, 12)));
CHECK_EQ(ExternalReference::new_space_start().address(),
decoder.Decode(make_code(UNCLASSIFIED, 7)));
}
@@ -302,6 +302,10 @@ DEPENDENT_TEST(Deserialize, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
+ // BUG(632): Disable this test until the partial_snapshots branch is
+ // merged back.
+ return;
+
v8::HandleScope scope;
Deserialize();
@@ -330,6 +334,10 @@ DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
SerializeTwice) {
+ // BUG(632): Disable this test until the partial_snapshots branch is
+ // merged back.
+ return;
+
v8::HandleScope scope;
Deserialize();
@@ -481,8 +489,8 @@ TEST(LinearAllocation) {
i += kSmallFixedArraySize) {
Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength);
if (new_last != NULL) {
- CHECK_EQ(reinterpret_cast<char*>(obj),
- reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
+ CHECK(reinterpret_cast<char*>(obj) ==
+ reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
}
new_last = obj;
}
@@ -500,8 +508,8 @@ TEST(LinearAllocation) {
pointer_last = NULL;
}
if (pointer_last != NULL) {
- CHECK_EQ(reinterpret_cast<char*>(obj),
- reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
+ CHECK(reinterpret_cast<char*>(obj) ==
+ reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
}
pointer_last = obj;
}
@@ -517,8 +525,8 @@ TEST(LinearAllocation) {
data_last = NULL;
}
if (data_last != NULL) {
- CHECK_EQ(reinterpret_cast<char*>(obj),
- reinterpret_cast<char*>(data_last) + kSmallStringSize);
+ CHECK(reinterpret_cast<char*>(obj) ==
+ reinterpret_cast<char*>(data_last) + kSmallStringSize);
}
data_last = obj;
}
@@ -534,8 +542,8 @@ TEST(LinearAllocation) {
map_last = NULL;
}
if (map_last != NULL) {
- CHECK_EQ(reinterpret_cast<char*>(obj),
- reinterpret_cast<char*>(map_last) + kMapSize);
+ CHECK(reinterpret_cast<char*>(obj) ==
+ reinterpret_cast<char*>(map_last) + kMapSize);
}
map_last = obj;
}
diff --git a/deps/v8/test/mjsunit/array-elements-from-array-prototype-chain.js b/deps/v8/test/mjsunit/array-elements-from-array-prototype-chain.js
new file mode 100644
index 0000000000..edbeb2a64e
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-elements-from-array-prototype-chain.js
@@ -0,0 +1,191 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+////////////////////////////////////////////////////////////////////////
+// Tests below verify that elements set on Array.prototype's proto propagate
+// for various Array.prototype functions (like unshift, shift, etc.)
+// If add any new tests here, consider adding them to all other files:
+// array-elements-from-array-prototype.js
+// array-elements-from-array-prototype-chain.js
+// array-elements-from-object-prototype.js
+// those ideally should be identical modulo host of elements and
+// the way elements introduced.
+//
+// Note: they are put into a separate file as we need maximally clean
+// VM setup---some optimizations might be already turned off in
+// 'dirty' VM.
+////////////////////////////////////////////////////////////////////////
+
+var at3 = '@3'
+var at7 = '@7'
+
+Array.prototype.__proto__ = {3: at3};
+Array.prototype.__proto__.__proto__ = {7: at7};
+
+var a = new Array(13)
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+assertEquals(undefined, a.shift(), 'hole should be returned as undefined')
+// Side-effects: Array.prototype[3] percolates into a[2] and Array.prototype[7[
+// into a[6], still visible at the corresponding indices.
+
+assertEquals(at3, a[2])
+assertTrue(a.hasOwnProperty(2))
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[6])
+assertTrue(a.hasOwnProperty(6))
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+a.unshift('foo', 'bar')
+// Side-effects: Array.prototype[3] now percolates into a[5] and Array.prototype[7]
+// into a[9].
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+var sliced = a.slice(3, 10)
+// Slice must keep intact a and reify holes at indices 0--2 and 4--6.
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+assertEquals(at3, sliced[0])
+assertTrue(sliced.hasOwnProperty(0))
+assertEquals(at3, sliced[1])
+assertTrue(sliced.hasOwnProperty(1))
+assertEquals(at3, sliced[2])
+assertTrue(sliced.hasOwnProperty(2))
+
+// Note: sliced[3] comes directly from Array.prototype[3]
+assertEquals(at3, sliced[3]);
+assertFalse(sliced.hasOwnProperty(3))
+
+assertEquals(at7, sliced[4])
+assertTrue(sliced.hasOwnProperty(4))
+assertEquals(at7, sliced[5])
+assertTrue(sliced.hasOwnProperty(5))
+assertEquals(at7, sliced[6])
+assertTrue(sliced.hasOwnProperty(6))
+
+
+// Splice is too complicated the operation, start afresh.
+
+// Shrking array.
+var a0 = [0, 1, , , 4, 5, , , , 9]
+var result = a0.splice(4, 1)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a0[0])
+assertTrue(a0.hasOwnProperty(0))
+assertEquals(1, a0[1])
+assertTrue(a0.hasOwnProperty(1))
+assertEquals(undefined, a0[2])
+assertFalse(a0.hasOwnProperty(2))
+assertEquals(at3, a0[3])
+assertFalse(a0.hasOwnProperty(3))
+
+// 4 and above shifted left by one reifying at7 into a0[6] and keeping
+// a hole at a0[7]
+
+assertEquals(5, a0[4])
+assertTrue(a0.hasOwnProperty(4))
+assertEquals(undefined, a0[5])
+assertFalse(a0.hasOwnProperty(5))
+assertEquals(at7, a0[6])
+assertTrue(a0.hasOwnProperty(6))
+assertEquals(at7, a0[7])
+assertFalse(a0.hasOwnProperty(7))
+assertEquals(9, a0[8])
+assertTrue(a0.hasOwnProperty(8))
+
+// Growing array.
+var a1 = [0, 1, , , 4, 5, , , , 9]
+var result = a1.splice(4, 0, undefined)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a1[0])
+assertTrue(a1.hasOwnProperty(0))
+assertEquals(1, a1[1])
+assertTrue(a1.hasOwnProperty(1))
+assertEquals(undefined, a1[2])
+assertFalse(a1.hasOwnProperty(2))
+assertEquals(at3, a1[3])
+assertFalse(a1.hasOwnProperty(3))
+
+// Now owned undefined resides at 4 and rest is shifted right by one
+// reifying at7 into a0[8] and keeping a hole at a0[7].
+
+assertEquals(undefined, a1[4])
+assertTrue(a1.hasOwnProperty(4))
+assertEquals(4, a1[5])
+assertTrue(a1.hasOwnProperty(5))
+assertEquals(5, a1[6])
+assertTrue(a1.hasOwnProperty(6))
+assertEquals(at7, a1[7])
+assertFalse(a1.hasOwnProperty(7))
+assertEquals(at7, a1[8])
+assertTrue(a1.hasOwnProperty(8))
+assertEquals(undefined, a1[9])
+assertFalse(a1.hasOwnProperty(9))
+assertEquals(9, a1[10])
+assertTrue(a1.hasOwnProperty(10))
diff --git a/deps/v8/test/mjsunit/array-elements-from-array-prototype.js b/deps/v8/test/mjsunit/array-elements-from-array-prototype.js
new file mode 100644
index 0000000000..b89cdfa1df
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-elements-from-array-prototype.js
@@ -0,0 +1,191 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+////////////////////////////////////////////////////////////////////////
+// Tests below verify that elements set on Array.prototype propagate
+// for various Array.prototype functions (like unshift, shift, etc.)
+// If add any new tests here, consider adding them to all other files:
+// array-elements-from-array-prototype.js
+// array-elements-from-array-prototype-chain.js
+// array-elements-from-object-prototype.js
+// those ideally should be identical modulo host of elements and
+// the way elements introduced.
+//
+// Note: they are put into a separate file as we need maximally clean
+// VM setup---some optimizations might be already turned off in
+// 'dirty' VM.
+////////////////////////////////////////////////////////////////////////
+
+var at3 = '@3'
+var at7 = '@7'
+
+Array.prototype[3] = at3
+Array.prototype[7] = at7
+
+var a = new Array(13)
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+assertEquals(undefined, a.shift(), 'hole should be returned as undefined')
+// Side-effects: Array.prototype[3] percolates into a[2] and Array.prototype[7[
+// into a[6], still visible at the corresponding indices.
+
+assertEquals(at3, a[2])
+assertTrue(a.hasOwnProperty(2))
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[6])
+assertTrue(a.hasOwnProperty(6))
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+a.unshift('foo', 'bar')
+// Side-effects: Array.prototype[3] now percolates into a[5] and Array.prototype[7]
+// into a[9].
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+var sliced = a.slice(3, 10)
+// Slice must keep intact a and reify holes at indices 0--2 and 4--6.
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+assertEquals(at3, sliced[0])
+assertTrue(sliced.hasOwnProperty(0))
+assertEquals(at3, sliced[1])
+assertTrue(sliced.hasOwnProperty(1))
+assertEquals(at3, sliced[2])
+assertTrue(sliced.hasOwnProperty(2))
+
+// Note: sliced[3] comes directly from Array.prototype[3]
+assertEquals(at3, sliced[3]);
+assertFalse(sliced.hasOwnProperty(3))
+
+assertEquals(at7, sliced[4])
+assertTrue(sliced.hasOwnProperty(4))
+assertEquals(at7, sliced[5])
+assertTrue(sliced.hasOwnProperty(5))
+assertEquals(at7, sliced[6])
+assertTrue(sliced.hasOwnProperty(6))
+
+
+// Splice is too complicated the operation, start afresh.
+
+// Shrking array.
+var a0 = [0, 1, , , 4, 5, , , , 9]
+var result = a0.splice(4, 1)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a0[0])
+assertTrue(a0.hasOwnProperty(0))
+assertEquals(1, a0[1])
+assertTrue(a0.hasOwnProperty(1))
+assertEquals(undefined, a0[2])
+assertFalse(a0.hasOwnProperty(2))
+assertEquals(at3, a0[3])
+assertFalse(a0.hasOwnProperty(3))
+
+// 4 and above shifted left by one reifying at7 into a0[6] and keeping
+// a hole at a0[7]
+
+assertEquals(5, a0[4])
+assertTrue(a0.hasOwnProperty(4))
+assertEquals(undefined, a0[5])
+assertFalse(a0.hasOwnProperty(5))
+assertEquals(at7, a0[6])
+assertTrue(a0.hasOwnProperty(6))
+assertEquals(at7, a0[7])
+assertFalse(a0.hasOwnProperty(7))
+assertEquals(9, a0[8])
+assertTrue(a0.hasOwnProperty(8))
+
+// Growing array.
+var a1 = [0, 1, , , 4, 5, , , , 9]
+var result = a1.splice(4, 0, undefined)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a1[0])
+assertTrue(a1.hasOwnProperty(0))
+assertEquals(1, a1[1])
+assertTrue(a1.hasOwnProperty(1))
+assertEquals(undefined, a1[2])
+assertFalse(a1.hasOwnProperty(2))
+assertEquals(at3, a1[3])
+assertFalse(a1.hasOwnProperty(3))
+
+// Now owned undefined resides at 4 and rest is shifted right by one
+// reifying at7 into a0[8] and keeping a hole at a0[7].
+
+assertEquals(undefined, a1[4])
+assertTrue(a1.hasOwnProperty(4))
+assertEquals(4, a1[5])
+assertTrue(a1.hasOwnProperty(5))
+assertEquals(5, a1[6])
+assertTrue(a1.hasOwnProperty(6))
+assertEquals(at7, a1[7])
+assertFalse(a1.hasOwnProperty(7))
+assertEquals(at7, a1[8])
+assertTrue(a1.hasOwnProperty(8))
+assertEquals(undefined, a1[9])
+assertFalse(a1.hasOwnProperty(9))
+assertEquals(9, a1[10])
+assertTrue(a1.hasOwnProperty(10))
diff --git a/deps/v8/test/mjsunit/array-elements-from-object-prototype.js b/deps/v8/test/mjsunit/array-elements-from-object-prototype.js
new file mode 100644
index 0000000000..a6ad0ee4ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-elements-from-object-prototype.js
@@ -0,0 +1,191 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+////////////////////////////////////////////////////////////////////////
+// Tests below verify that elements set on Object.prototype propagate
+// for various Array.prototype functions (like unshift, shift, etc.)
+// If add any new tests here, consider adding them to all other files:
+// array-elements-from-array-prototype.js
+// array-elements-from-array-prototype-chain.js
+// array-elements-from-object-prototype.js
+// those ideally should be identical modulo host of elements and
+// the way elements introduced.
+//
+// Note: they are put into a separate file as we need maximally clean
+// VM setup---some optimizations might be already turned off in
+// 'dirty' VM.
+////////////////////////////////////////////////////////////////////////
+
+var at3 = '@3'
+var at7 = '@7'
+
+Object.prototype[3] = at3
+Object.prototype[7] = at7
+
+var a = new Array(13)
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+assertEquals(undefined, a.shift(), 'hole should be returned as undefined')
+// Side-effects: Array.prototype[3] percolates into a[2] and Array.prototype[7[
+// into a[6], still visible at the corresponding indices.
+
+assertEquals(at3, a[2])
+assertTrue(a.hasOwnProperty(2))
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+
+assertEquals(at7, a[6])
+assertTrue(a.hasOwnProperty(6))
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+
+a.unshift('foo', 'bar')
+// Side-effects: Array.prototype[3] now percolates into a[5] and Array.prototype[7]
+// into a[9].
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+var sliced = a.slice(3, 10)
+// Slice must keep intact a and reify holes at indices 0--2 and 4--6.
+
+assertEquals(at3, a[3])
+assertFalse(a.hasOwnProperty(3))
+assertEquals(at3, a[4])
+assertTrue(a.hasOwnProperty(4))
+assertEquals(at3, a[5])
+assertTrue(a.hasOwnProperty(5))
+
+assertEquals(undefined, a[6])
+assertFalse(a.hasOwnProperty(6))
+
+assertEquals(at7, a[7])
+assertFalse(a.hasOwnProperty(7))
+assertEquals(at7, a[8])
+assertTrue(a.hasOwnProperty(8))
+assertEquals(at7, a[9])
+assertTrue(a.hasOwnProperty(9))
+
+assertEquals(at3, sliced[0])
+assertTrue(sliced.hasOwnProperty(0))
+assertEquals(at3, sliced[1])
+assertTrue(sliced.hasOwnProperty(1))
+assertEquals(at3, sliced[2])
+assertTrue(sliced.hasOwnProperty(2))
+
+// Note: sliced[3] comes directly from Array.prototype[3]
+assertEquals(at3, sliced[3]);
+assertFalse(sliced.hasOwnProperty(3))
+
+assertEquals(at7, sliced[4])
+assertTrue(sliced.hasOwnProperty(4))
+assertEquals(at7, sliced[5])
+assertTrue(sliced.hasOwnProperty(5))
+assertEquals(at7, sliced[6])
+assertTrue(sliced.hasOwnProperty(6))
+
+
+// Splice is too complicated the operation, start afresh.
+
+// Shrking array.
+var a0 = [0, 1, , , 4, 5, , , , 9]
+var result = a0.splice(4, 1)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a0[0])
+assertTrue(a0.hasOwnProperty(0))
+assertEquals(1, a0[1])
+assertTrue(a0.hasOwnProperty(1))
+assertEquals(undefined, a0[2])
+assertFalse(a0.hasOwnProperty(2))
+assertEquals(at3, a0[3])
+assertFalse(a0.hasOwnProperty(3))
+
+// 4 and above shifted left by one reifying at7 into a0[6] and keeping
+// a hole at a0[7]
+
+assertEquals(5, a0[4])
+assertTrue(a0.hasOwnProperty(4))
+assertEquals(undefined, a0[5])
+assertFalse(a0.hasOwnProperty(5))
+assertEquals(at7, a0[6])
+assertTrue(a0.hasOwnProperty(6))
+assertEquals(at7, a0[7])
+assertFalse(a0.hasOwnProperty(7))
+assertEquals(9, a0[8])
+assertTrue(a0.hasOwnProperty(8))
+
+// Growing array.
+var a1 = [0, 1, , , 4, 5, , , , 9]
+var result = a1.splice(4, 0, undefined)
+// Side-effects: everything before 4 is kept intact:
+
+assertEquals(0, a1[0])
+assertTrue(a1.hasOwnProperty(0))
+assertEquals(1, a1[1])
+assertTrue(a1.hasOwnProperty(1))
+assertEquals(undefined, a1[2])
+assertFalse(a1.hasOwnProperty(2))
+assertEquals(at3, a1[3])
+assertFalse(a1.hasOwnProperty(3))
+
+// Now owned undefined resides at 4 and rest is shifted right by one
+// reifying at7 into a0[8] and keeping a hole at a0[7].
+
+assertEquals(undefined, a1[4])
+assertTrue(a1.hasOwnProperty(4))
+assertEquals(4, a1[5])
+assertTrue(a1.hasOwnProperty(5))
+assertEquals(5, a1[6])
+assertTrue(a1.hasOwnProperty(6))
+assertEquals(at7, a1[7])
+assertFalse(a1.hasOwnProperty(7))
+assertEquals(at7, a1[8])
+assertTrue(a1.hasOwnProperty(8))
+assertEquals(undefined, a1[9])
+assertFalse(a1.hasOwnProperty(9))
+assertEquals(9, a1[10])
+assertTrue(a1.hasOwnProperty(10))
diff --git a/deps/v8/test/mjsunit/array-length.js b/deps/v8/test/mjsunit/array-length.js
index 9731e7a3ed..967d7203ce 100644
--- a/deps/v8/test/mjsunit/array-length.js
+++ b/deps/v8/test/mjsunit/array-length.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var a = [0,1,2,3];
-a.length = 0;
+assertEquals(0, a.length = 0);
assertEquals('undefined', typeof a[0]);
assertEquals('undefined', typeof a[1]);
@@ -35,7 +35,7 @@ assertEquals('undefined', typeof a[3]);
var a = [0,1,2,3];
-a.length = 2;
+assertEquals(2, a.length = 2);
assertEquals(0, a[0]);
assertEquals(1, a[1]);
@@ -50,7 +50,7 @@ a[1000000] = 1000000;
a[2000000] = 2000000;
assertEquals(2000001, a.length);
-a.length = 0;
+assertEquals(0, a.length = 0);
assertEquals(0, a.length);
assertEquals('undefined', typeof a[0]);
assertEquals('undefined', typeof a[1000]);
@@ -65,7 +65,7 @@ a[1000000] = 1000000;
a[2000000] = 2000000;
assertEquals(2000001, a.length);
-a.length = 2000;
+assertEquals(2000, a.length = 2000);
assertEquals(2000, a.length);
assertEquals(0, a[0]);
assertEquals(1000, a[1000]);
@@ -91,7 +91,7 @@ assertEquals(Math.pow(2,31)-1, a[Math.pow(2,31)-1]);
assertEquals(Math.pow(2,32)-2, a[Math.pow(2,32)-2]);
assertEquals(Math.pow(2,32)-1, a.length);
-a.length = Math.pow(2,30)+1; // not a smi!
+assertEquals(Math.pow(2,30) + 1, a.length = Math.pow(2,30)+1); // not a smi!
assertEquals(Math.pow(2,30)+1, a.length);
assertEquals(0, a[0]);
@@ -102,10 +102,20 @@ assertEquals('undefined', typeof a[Math.pow(2,32)-2], "top");
var a = new Array();
-a.length = new Number(12);
+assertEquals(12, a.length = new Number(12));
assertEquals(12, a.length);
var o = { length: -23 };
Array.prototype.pop.apply(o);
assertEquals(4294967272, o.length);
+
+// Check case of compiled stubs.
+var a = [];
+for (var i = 0; i < 7; i++) {
+ assertEquals(3, a.length = 3);
+
+ var t = 239;
+ t = a.length = 7;
+ assertEquals(7, t);
+}
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
index c993a077f6..30e9f3e9ee 100644
--- a/deps/v8/test/mjsunit/array-slice.js
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -36,6 +36,17 @@
})();
+// Check various variants of empty array's slicing.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ assertEquals([], [].slice(0, 0));
+ assertEquals([], [].slice(1, 0));
+ assertEquals([], [].slice(0, 1));
+ assertEquals([], [].slice(-1, 0));
+ }
+})();
+
+
// Check various forms of arguments omission.
(function() {
var array = new Array(7);
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 18f81fe842..887097db61 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -31,13 +31,34 @@
var array = new Array(10);
var spliced = array.splice(1, 1, 'one', 'two');
assertEquals(1, spliced.length);
- assertFalse(0 in spliced);
+ assertFalse(0 in spliced, "0 in spliced");
assertEquals(11, array.length);
- assertFalse(0 in array);
+ assertFalse(0 in array, "0 in array");
assertTrue(1 in array);
assertTrue(2 in array);
- assertFalse(3 in array);
+ assertFalse(3 in array, "3 in array");
+ }
+})();
+
+
+// Check various variants of empty array's splicing.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ assertEquals([], [].splice(0, 0));
+ assertEquals([], [].splice(1, 0));
+ assertEquals([], [].splice(0, 1));
+ assertEquals([], [].splice(-1, 0));
+ }
+})();
+
+
+// Check that even if result array is empty, receiver gets sliced.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var a = [1, 2, 3];
+ assertEquals([], a.splice(1, 0, 'a', 'b', 'c'));
+ assertEquals([1, 'a', 'b', 'c', 2, 3], a);
}
})();
@@ -249,23 +270,23 @@
assertEquals(undefined, array[7]);
// and now check hasOwnProperty
- assertFalse(array.hasOwnProperty(0));
- assertFalse(array.hasOwnProperty(1));
+ assertFalse(array.hasOwnProperty(0), "array.hasOwnProperty(0)");
+ assertFalse(array.hasOwnProperty(1), "array.hasOwnProperty(1)");
assertTrue(array.hasOwnProperty(2));
assertTrue(array.hasOwnProperty(3));
assertTrue(array.hasOwnProperty(4));
- assertFalse(array.hasOwnProperty(5));
- assertFalse(array.hasOwnProperty(6));
- assertFalse(array.hasOwnProperty(7));
+ assertFalse(array.hasOwnProperty(5), "array.hasOwnProperty(5)");
+ assertFalse(array.hasOwnProperty(6), "array.hasOwnProperty(6)");
+ assertFalse(array.hasOwnProperty(7), "array.hasOwnProperty(7)");
assertTrue(array.hasOwnProperty(8));
- assertFalse(array.hasOwnProperty(9));
+ assertFalse(array.hasOwnProperty(9), "array.hasOwnProperty(9)");
// and now check couple of indices above length.
- assertFalse(array.hasOwnProperty(10));
- assertFalse(array.hasOwnProperty(15));
- assertFalse(array.hasOwnProperty(31));
- assertFalse(array.hasOwnProperty(63));
- assertFalse(array.hasOwnProperty(2 << 32 - 1));
+ assertFalse(array.hasOwnProperty(10), "array.hasOwnProperty(10)");
+ assertFalse(array.hasOwnProperty(15), "array.hasOwnProperty(15)");
+ assertFalse(array.hasOwnProperty(31), "array.hasOwnProperty(31)");
+ assertFalse(array.hasOwnProperty(63), "array.hasOwnProperty(63)");
+ assertFalse(array.hasOwnProperty(2 << 32 - 1), "array.hasOwnProperty(2 << 31 - 1)");
}
})();
@@ -287,3 +308,13 @@
assertEquals(bigNum + 7, array.length);
}
})();
+
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var a = [7, 8, 9];
+ a.splice(0, 0, 1, 2, 3, 4, 5, 6);
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], a);
+ assertFalse(a.hasOwnProperty(10), "a.hasOwnProperty(10)");
+ assertEquals(undefined, a[10]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/array-unshift.js b/deps/v8/test/mjsunit/array-unshift.js
index 06a78a7d9e..dbe245b8b4 100644
--- a/deps/v8/test/mjsunit/array-unshift.js
+++ b/deps/v8/test/mjsunit/array-unshift.js
@@ -130,3 +130,11 @@
assertEquals(bigNum + 7, new Array(bigNum).unshift(1, 2, 3, 4, 5, 6, 7));
}
})();
+
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var a = [6, 7, 8, 9];
+ a.unshift(1, 2, 3, 4, 5);
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], a);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/bugs/bug-618.js b/deps/v8/test/mjsunit/bugs/bug-618.js
new file mode 100644
index 0000000000..8f47440354
--- /dev/null
+++ b/deps/v8/test/mjsunit/bugs/bug-618.js
@@ -0,0 +1,45 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// When this bug is corrected move to object-define-property and add
+// additional tests for configurable in the same manner as existing tests
+// there.
+
+function C() {
+ this.x = 23;
+}
+
+// If a setter is added to the prototype chain of a simple constructor setting
+// one of the properties assigned in the constructor then this setter is
+// ignored when constructing new objects from the constructor.
+
+// This only happens if the setter is added _after_ an instance has been
+// created.
+
+assertEquals(23, new C().x);
+C.prototype.__defineSetter__('x', function(value) { this.y = 23; });
+assertEquals(void 0, new C().x));
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index 8c53910c17..a592e4c4ae 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -147,3 +147,17 @@ function testToLocaleTimeString() {
}
testToLocaleTimeString();
+
+
+// Modified test from WebKit
+// LayoutTests/fast/js/script-tests/date-utc-timeclip.js:
+
+assertEquals(Date.UTC(275760, 8, 12, 23, 59, 59, 999), 8639999999999999);
+assertEquals(Date.UTC(275760, 8, 13), 8640000000000000);
+assertTrue(isNaN(Date.UTC(275760, 8, 13, 0, 0, 0, 1)));
+assertTrue(isNaN(Date.UTC(275760, 8, 14)));
+
+assertEquals(Date.UTC(-271821, 3, 20, 0, 0, 0, 1), -8639999999999999);
+assertEquals(Date.UTC(-271821, 3, 20), -8640000000000000);
+assertTrue(isNaN(Date.UTC(-271821, 3, 19, 23, 59, 59, 999)));
+assertTrue(isNaN(Date.UTC(-271821, 3, 19)));
diff --git a/deps/v8/test/mjsunit/debug-liveedit-1.js b/deps/v8/test/mjsunit/debug-liveedit-1.js
new file mode 100644
index 0000000000..9c966a2fec
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-1.js
@@ -0,0 +1,48 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+eval("var something1 = 25; "
+ + " function ChooseAnimal() { return 'Cat'; } "
+ + " ChooseAnimal.Helper = function() { return 'Help!'; }");
+
+assertEquals("Cat", ChooseAnimal());
+
+var script = Debug.findScript(ChooseAnimal);
+
+var orig_animal = "Cat";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "Cap' + 'y' + 'bara";
+
+var change_log = new Array();
+Debug.LiveEditChangeScript(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
+
+assertEquals("Capybara", ChooseAnimal());
diff --git a/deps/v8/test/mjsunit/debug-liveedit-2.js b/deps/v8/test/mjsunit/debug-liveedit-2.js
new file mode 100644
index 0000000000..8a40dfc75c
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-2.js
@@ -0,0 +1,70 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+
+Debug = debug.Debug
+
+
+eval(
+ "function ChooseAnimal(p) {\n " +
+ " if (p == 7) {\n" + // Use p
+ " return;\n" +
+ " }\n" +
+ " return function Chooser() {\n" +
+ " return 'Cat';\n" +
+ " };\n" +
+ "}\n"
+);
+
+var old_closure = ChooseAnimal(19);
+
+assertEquals("Cat", old_closure());
+
+var script = Debug.findScript(ChooseAnimal);
+
+var orig_animal = "'Cat'";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "'Capybara' + p";
+
+// We patch innermost function "Chooser".
+// However, this does not actually patch existing "Chooser" instances,
+// because old value of parameter "p" was not saved.
+// Instead it patches ChooseAnimal.
+var change_log = new Array();
+Debug.LiveEditChangeScript(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
+print("Change log: " + JSON.stringify(change_log) + "\n");
+
+var new_closure = ChooseAnimal(19);
+// New instance of closure is patched.
+assertEquals("Capybara19", new_closure());
+
+// Old instance of closure is not patched.
+assertEquals("Cat", old_closure());
+
diff --git a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
new file mode 100644
index 0000000000..1d788be86e
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
@@ -0,0 +1,84 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+eval(
+ "function ChooseAnimal(callback) {\n " +
+ " callback();\n" +
+ " return 'Cat';\n" +
+ "}\n"
+);
+
+function Noop() {}
+var res = ChooseAnimal(Noop);
+
+assertEquals("Cat", res);
+
+var script = Debug.findScript(ChooseAnimal);
+
+var orig_animal = "'Cat'";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "'Capybara'";
+
+var got_exception = false;
+var successfully_changed = false;
+
+function Changer() {
+ // Never try the same patch again.
+ assertEquals(false, successfully_changed);
+ var change_log = new Array();
+ try {
+ Debug.LiveEditChangeScript(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
+ successfully_changed = true;
+ } catch (e) {
+ if (e instanceof Debug.LiveEditChangeScript.Failure) {
+ got_exception = true;
+ print(e);
+ } else {
+ throw e;
+ }
+ }
+ print("Change log: " + JSON.stringify(change_log) + "\n");
+}
+
+var new_res = ChooseAnimal(Changer);
+// Function must be not pached.
+assertEquals("Cat", new_res);
+
+assertEquals(true, got_exception);
+
+// This time it should succeed.
+Changer();
+
+new_res = ChooseAnimal(Noop);
+// Function must be not pached.
+assertEquals("Capybara", new_res);
+
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index af29df98cc..37cefd1a93 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -84,16 +84,16 @@ function CheckScopeChain(scopes, exec_state) {
var scope = exec_state.frame().scope(i);
assertTrue(scope.isScope());
assertEquals(scopes[i], scope.scopeType());
-
+
// Check the global object when hitting the global scope.
if (scopes[i] == debug.ScopeType.Global) {
assertEquals(this, scope.scopeObject().value());
}
}
-
+
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
+
// Send a scopes request and check the result.
var json;
request_json = '{"seq":0,"type":"request","command":"scopes"}'
@@ -133,7 +133,7 @@ function CheckScopeContent(content, number, exec_state) {
}
count++;
}
-
+
// 'arguments' and might be exposed in the local and closure scope. Just
// ignore this.
var scope_size = scope.scopeObject().properties().length;
@@ -156,7 +156,7 @@ function CheckScopeContent(content, number, exec_state) {
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
+
// Send a scope request for information on a single scope and check the
// result.
request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":'
@@ -622,7 +622,7 @@ function the_full_monty(a, b) {
with ({j:13}){
return function() {
var x = 14;
- with ({a:15}) {
+ with ({a:15}) {
with ({b:16}) {
debugger;
some_global = a;
@@ -707,7 +707,7 @@ EndTest();
BeginTest("Catch block 3");
-function catch_block_1() {
+function catch_block_3() {
// Do eval to dynamically declare a local variable so that the context's
// extension slot is initialized with JSContextExtensionObject.
eval("var y = 78;");
@@ -726,12 +726,12 @@ listener_delegate = function(exec_state) {
CheckScopeContent({e:'Exception'}, 0, exec_state);
CheckScopeContent({y:78}, 1, exec_state);
}
-catch_block_1()
+catch_block_3()
EndTest();
BeginTest("Catch block 4");
-function catch_block_2() {
+function catch_block_4() {
// Do eval to dynamically declare a local variable so that the context's
// extension slot is initialized with JSContextExtensionObject.
eval("var y = 98;");
@@ -753,7 +753,7 @@ listener_delegate = function(exec_state) {
CheckScopeContent({e:'Exception'}, 1, exec_state);
CheckScopeContent({y:98}, 2, exec_state);
}
-catch_block_2()
+catch_block_4()
EndTest();
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index effa145dee..402f90cf83 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -52,7 +52,7 @@ for (i = 0; i < scripts.length; i++) {
}
// This has to be updated if the number of native scripts change.
-assertEquals(12, named_native_count);
+assertEquals(13, named_native_count);
// If no snapshot is used, only the 'gc' extension is loaded.
// If snapshot is used, all extensions are cached in the snapshot.
assertTrue(extension_count == 1 || extension_count == 5);
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index e2f601eb9f..375038816a 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -147,7 +147,19 @@ var knownProblems = {
"DeclareGlobals": true,
"PromoteScheduledException": true,
- "DeleteHandleScopeExtensions": true
+ "DeleteHandleScopeExtensions": true,
+
+ // That can only be invoked on Array.prototype.
+ "FinishArrayPrototypeSetup": true,
+
+ // LiveEdit feature is under development currently and has fragile input.
+ "LiveEditFindSharedFunctionInfosForScript": true,
+ "LiveEditGatherCompileInfo": true,
+ "LiveEditReplaceScript": true,
+ "LiveEditReplaceFunctionCode": true,
+ "LiveEditRelinkFunctionToScript": true,
+ "LiveEditPatchFunctionPositions": true,
+ "LiveEditCheckStackActivations": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/math-sqrt.js b/deps/v8/test/mjsunit/math-sqrt.js
new file mode 100644
index 0000000000..ae29b74381
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-sqrt.js
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the special cases specified by ES 15.8.2.17
+
+// Simple sanity check
+assertEquals(2, Math.sqrt(4));
+assertEquals(0.1, Math.sqrt(0.01));
+
+// Spec tests
+assertEquals(NaN, Math.sqrt(NaN));
+assertEquals(NaN, Math.sqrt(-1));
+assertEquals(+0, Math.sqrt(+0));
+assertEquals(-0, Math.sqrt(-0));
+assertEquals(Infinity, Math.sqrt(Infinity));
+// -Infinity is smaller than 0 so it should return NaN
+assertEquals(NaN, Math.sqrt(-Infinity));
+
+
+
diff --git a/deps/v8/test/mjsunit/regress/regress-634.js b/deps/v8/test/mjsunit/regress/regress-634.js
new file mode 100644
index 0000000000..b68e843740
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-634.js
@@ -0,0 +1,32 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+for (var i = 0; i < 1000000; i++) {
+ a = new Array(0);
+ assertEquals(0, a.length);
+ assertEquals(0, a.length);
+}
diff --git a/deps/v8/src/usage-analyzer.h b/deps/v8/test/mjsunit/regress/regress-636.js
index 1b0ea4a0fb..8e0196d6f8 100644
--- a/deps/v8/src/usage-analyzer.h
+++ b/deps/v8/test/mjsunit/regress/regress-636.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,16 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+function test() {
+ var i, result = "";
+ var value = parseFloat(5.5);
+ value = Math.abs(1025);
+ for(i = 12; --i; result = ( value % 2 ) + result, value >>= 1);
+ return result;
+};
-namespace v8 {
-namespace internal {
-
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
-
-} } // namespace v8::internal
-
-#endif // V8_USAGE_ANALYZER_H_
+assertEquals("10000000001", test());
diff --git a/deps/v8/test/mjsunit/string-charat.js b/deps/v8/test/mjsunit/string-charat.js
index 8ec8f1e08f..d1989dfd73 100644
--- a/deps/v8/test/mjsunit/string-charat.js
+++ b/deps/v8/test/mjsunit/string-charat.js
@@ -51,3 +51,16 @@ assertEquals(116, s.charCodeAt(NaN));
assertTrue(isNaN(s.charCodeAt(-1)));
assertTrue(isNaN(s.charCodeAt(4)));
+// Make sure enough of the one-char string cache is filled.
+var alpha = ['@'];
+for (var i = 1; i < 128; i++) {
+ var c = String.fromCharCode(i);
+ alpha[i] = c.charAt(0);
+}
+var alphaStr = alpha.join("");
+
+// Now test chars.
+for (var i = 1; i < 128; i++) {
+ assertEquals(alpha[i], alphaStr.charAt(i));
+ assertEquals(String.fromCharCode(i), alphaStr.charAt(i));
+}
diff --git a/deps/v8/test/mjsunit/string-index.js b/deps/v8/test/mjsunit/string-index.js
index 2256286eec..c6b26a85ee 100644
--- a/deps/v8/test/mjsunit/string-index.js
+++ b/deps/v8/test/mjsunit/string-index.js
@@ -152,3 +152,17 @@ assertEquals('o', S2);
var s2 = (s[-2] = 't');
assertEquals('undefined', typeof(s[-2]));
assertEquals('t', s2);
+
+// Make sure enough of the one-char string cache is filled.
+var alpha = ['@'];
+for (var i = 1; i < 128; i++) {
+ var c = String.fromCharCode(i);
+ alpha[i] = c[0];
+}
+var alphaStr = alpha.join("");
+
+// Now test chars.
+for (var i = 1; i < 128; i++) {
+ assertEquals(alpha[i], alphaStr[i]);
+ assertEquals(String.fromCharCode(i), alphaStr[i]);
+}
diff --git a/deps/v8/test/mjsunit/string-split-cache.js b/deps/v8/test/mjsunit/string-split-cache.js
new file mode 100644
index 0000000000..37c550f471
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-split-cache.js
@@ -0,0 +1,40 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+var str = "abcdef";
+
+// Get a prefix of the string into the one-char string cache.
+assertEquals("a", str[0]);
+assertEquals("b", str[1]);
+assertEquals("c", str[2]);
+
+// Splitting by "" calls runtime StringToArray function that uses the
+// cache. So this test hits a case where only a prefix is cached.
+var array = str.split("");
+var expected = ["a", "b", "c", "d", "e", "f"];
+assertArrayEquals(expected, array);
diff --git a/deps/v8/test/mjsunit/undeletable-functions.js b/deps/v8/test/mjsunit/undeletable-functions.js
index 86a74263e3..04fd06068d 100644
--- a/deps/v8/test/mjsunit/undeletable-functions.js
+++ b/deps/v8/test/mjsunit/undeletable-functions.js
@@ -39,6 +39,13 @@ array = [
"every", "map", "indexOf", "lastIndexOf", "reduce", "reduceRight"];
CheckJSCSemantics(Array.prototype, array, "Array prototype");
+var old_Array_prototype = Array.prototype;
+var new_Array_prototype = {};
+for (var i = 0; i < 7; i++) {
+ Array.prototype = new_Array_prototype;
+ assertEquals(old_Array_prototype, Array.prototype);
+}
+
array = [
"toString", "toDateString", "toTimeString", "toLocaleString",
"toLocaleDateString", "toLocaleTimeString", "valueOf", "getTime",
@@ -79,6 +86,13 @@ array = [
"__lookupGetter__", "__defineSetter__", "__lookupSetter__"];
CheckEcmaSemantics(Object.prototype, array, "Object prototype");
+var old_Object_prototype = Object.prototype;
+var new_Object_prototype = {};
+for (var i = 0; i < 7; i++) {
+ Object.prototype = new_Object_prototype;
+ assertEquals(old_Object_prototype, Object.prototype);
+}
+
array = [
"toString", "valueOf", "toJSON"];
CheckEcmaSemantics(Boolean.prototype, array, "Boolean prototype");
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index efba10b003..59ec566d57 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -27,7 +27,6 @@
{
'variables': {
- 'chromium_code': 1,
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'target_arch%': 'ia32',
@@ -377,8 +376,6 @@
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
- '../../src/usage-analyzer.cc',
- '../../src/usage-analyzer.h',
'../../src/utils.cc',
'../../src/utils.h',
'../../src/v8-counters.cc',
@@ -391,9 +388,9 @@
'../../src/variables.h',
'../../src/version.cc',
'../../src/version.h',
- '../../src/virtual-frame-inl.h',
- '../../src/virtual-frame.h',
+ '../../src/virtual-frame-inl.h',
'../../src/virtual-frame.cc',
+ '../../src/virtual-frame.h',
'../../src/zone-inl.h',
'../../src/zone.cc',
'../../src/zone.h',
@@ -563,6 +560,7 @@
'../../src/messages.js',
'../../src/apinatives.js',
'../../src/debug-delay.js',
+ '../../src/liveedit-delay.js',
'../../src/mirror-delay.js',
'../../src/date-delay.js',
'../../src/json-delay.js',
diff --git a/deps/v8/tools/visual_studio/js2c.cmd b/deps/v8/tools/visual_studio/js2c.cmd
index df5293ba51..54b1bfb552 100644
--- a/deps/v8/tools/visual_studio/js2c.cmd
+++ b/deps/v8/tools/visual_studio/js2c.cmd
@@ -3,4 +3,4 @@ set SOURCE_DIR=%1
set TARGET_DIR=%2
set PYTHON="..\..\..\third_party\python_24\python.exe"
if not exist %PYTHON% set PYTHON=python.exe
-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-delay.js %SOURCE_DIR%\mirror-delay.js %SOURCE_DIR%\date-delay.js %SOURCE_DIR%\regexp-delay.js %SOURCE_DIR%\json-delay.js
+%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-delay.js %SOURCE_DIR%\liveedit-delay.js %SOURCE_DIR%\mirror-delay.js %SOURCE_DIR%\date-delay.js %SOURCE_DIR%\regexp-delay.js %SOURCE_DIR%\json-delay.js
diff --git a/deps/v8/tools/visual_studio/v8.vcproj b/deps/v8/tools/visual_studio/v8.vcproj
index 47ba8c1f44..3122c6d598 100644
--- a/deps/v8/tools/visual_studio/v8.vcproj
+++ b/deps/v8/tools/visual_studio/v8.vcproj
@@ -143,6 +143,10 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit-delay.js"
+ >
+ </File>
+ <File
RelativePath="..\..\src\macros.py"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_arm.vcproj b/deps/v8/tools/visual_studio/v8_arm.vcproj
index d21affe9d2..cb7519bdc7 100644
--- a/deps/v8/tools/visual_studio/v8_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_arm.vcproj
@@ -143,6 +143,10 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit-delay.js"
+ >
+ </File>
+ <File
RelativePath="..\..\src\macros.py"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index 6238044611..1a7e14c3a5 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -881,14 +881,6 @@
>
</File>
<File
- RelativePath="..\..\src\usage-analyzer.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\usage-analyzer.h"
- >
- </File>
- <File
RelativePath="..\..\src\utils.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index 7f97da674e..346c5eb19c 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -893,14 +893,6 @@
>
</File>
<File
- RelativePath="..\..\src\usage-analyzer.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\usage-analyzer.h"
- >
- </File>
- <File
RelativePath="..\..\src\utils.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index b7ad20d5d9..120dd190d7 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -882,14 +882,6 @@
>
</File>
<File
- RelativePath="..\..\src\usage-analyzer.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\usage-analyzer.h"
- >
- </File>
- <File
RelativePath="..\..\src\utils.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_x64.vcproj b/deps/v8/tools/visual_studio/v8_x64.vcproj
index cbf88c918d..a476d7dca7 100644
--- a/deps/v8/tools/visual_studio/v8_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_x64.vcproj
@@ -143,6 +143,10 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit-delay.js"
+ >
+ </File>
+ <File
RelativePath="..\..\src\macros.py"
>
</File>