summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorisaacs <i@izs.me>2012-03-28 19:51:38 -0700
committerisaacs <i@izs.me>2012-03-28 19:51:38 -0700
commit4b64542fe09477fc5c70e974eb1a78cdce755eb7 (patch)
treeb4d4cdfd5b07efbdae51098b422fde7844ff4715 /deps
parent8a15147bc53849417f8737dd873877d497867c9f (diff)
downloadnode-new-4b64542fe09477fc5c70e974eb1a78cdce755eb7.tar.gz
Upgrade V8 to 3.9.24.6
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/ChangeLog81
-rw-r--r--deps/v8/SConstruct35
-rw-r--r--deps/v8/benchmarks/README.txt7
-rw-r--r--deps/v8/benchmarks/revisions.html4
-rw-r--r--deps/v8/benchmarks/run.html20
-rw-r--r--deps/v8/build/common.gypi12
-rw-r--r--deps/v8/build/mipsu.gypi3
-rw-r--r--deps/v8/include/v8.h8
-rwxr-xr-xdeps/v8/src/SConscript1
-rw-r--r--deps/v8/src/api.cc191
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h12
-rw-r--r--deps/v8/src/arm/assembler-arm.cc21
-rw-r--r--deps/v8/src/arm/assembler-arm.h70
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc64
-rw-r--r--deps/v8/src/arm/codegen-arm.cc7
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc30
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc209
-rw-r--r--deps/v8/src/arm/lithium-arm.cc8
-rw-r--r--deps/v8/src/arm/lithium-arm.h17
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc77
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc4
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc16
-rw-r--r--deps/v8/src/assembler.cc54
-rw-r--r--deps/v8/src/assembler.h15
-rw-r--r--deps/v8/src/ast.cc12
-rw-r--r--deps/v8/src/ast.h15
-rw-r--r--deps/v8/src/builtins.cc45
-rw-r--r--deps/v8/src/codegen.cc13
-rw-r--r--deps/v8/src/codegen.h6
-rw-r--r--deps/v8/src/compiler.cc20
-rw-r--r--deps/v8/src/d8.cc8
-rw-r--r--deps/v8/src/debug-agent.cc29
-rw-r--r--deps/v8/src/debug.cc25
-rw-r--r--deps/v8/src/debug.h1
-rw-r--r--deps/v8/src/deoptimizer.cc3
-rw-r--r--deps/v8/src/deoptimizer.h9
-rw-r--r--deps/v8/src/elements.cc259
-rw-r--r--deps/v8/src/elements.h15
-rw-r--r--deps/v8/src/execution.cc3
-rw-r--r--deps/v8/src/factory.cc5
-rw-r--r--deps/v8/src/flag-definitions.h55
-rw-r--r--deps/v8/src/flags.cc15
-rw-r--r--deps/v8/src/frames.cc9
-rw-r--r--deps/v8/src/full-codegen.cc3
-rw-r--r--deps/v8/src/full-codegen.h24
-rw-r--r--deps/v8/src/gdb-jit.cc9
-rw-r--r--deps/v8/src/gdb-jit.h3
-rw-r--r--deps/v8/src/globals.h3
-rw-r--r--deps/v8/src/handles.cc158
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/hashmap.h73
-rw-r--r--deps/v8/src/heap.cc199
-rw-r--r--deps/v8/src/heap.h44
-rw-r--r--deps/v8/src/hydrogen-instructions.cc49
-rw-r--r--deps/v8/src/hydrogen-instructions.h57
-rw-r--r--deps/v8/src/hydrogen.cc214
-rw-r--r--deps/v8/src/hydrogen.h33
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h36
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc41
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc38
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc43
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc59
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc108
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h6
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc20
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h30
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc15
-rw-r--r--deps/v8/src/ic.cc11
-rw-r--r--deps/v8/src/incremental-marking.cc15
-rw-r--r--deps/v8/src/isolate-inl.h10
-rw-r--r--deps/v8/src/isolate.cc137
-rw-r--r--deps/v8/src/isolate.h40
-rw-r--r--deps/v8/src/jsregexp.cc28
-rw-r--r--deps/v8/src/lazy-instance.h216
-rw-r--r--deps/v8/src/lithium-allocator.cc23
-rw-r--r--deps/v8/src/lithium.cc25
-rw-r--r--deps/v8/src/lithium.h14
-rw-r--r--deps/v8/src/log.cc26
-rw-r--r--deps/v8/src/log.h1
-rw-r--r--deps/v8/src/mark-compact-inl.h9
-rw-r--r--deps/v8/src/mark-compact.cc6
-rw-r--r--deps/v8/src/mark-compact.h3
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h28
-rw-r--r--deps/v8/src/mips/assembler-mips.cc23
-rw-r--r--deps/v8/src/mips/assembler-mips.h14
-rw-r--r--deps/v8/src/mips/builtins-mips.cc8
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc323
-rw-r--r--deps/v8/src/mips/codegen-mips.cc7
-rw-r--r--deps/v8/src/mips/constants-mips.h15
-rw-r--r--deps/v8/src/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc8
-rw-r--r--deps/v8/src/mips/disasm-mips.cc18
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc264
-rw-r--r--deps/v8/src/mips/ic-mips.cc47
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc81
-rw-r--r--deps/v8/src/mips/lithium-mips.cc15
-rw-r--r--deps/v8/src/mips/lithium-mips.h35
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc283
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h81
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc16
-rw-r--r--deps/v8/src/mips/simulator-mips.h8
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc53
-rw-r--r--deps/v8/src/mirror-debugger.js5
-rw-r--r--deps/v8/src/objects-debug.cc3
-rw-r--r--deps/v8/src/objects-inl.h131
-rw-r--r--deps/v8/src/objects.cc234
-rw-r--r--deps/v8/src/objects.h84
-rw-r--r--deps/v8/src/once.cc77
-rw-r--r--deps/v8/src/once.h123
-rw-r--r--deps/v8/src/parser.cc58
-rw-r--r--deps/v8/src/parser.h10
-rw-r--r--deps/v8/src/platform-cygwin.cc8
-rw-r--r--deps/v8/src/platform-freebsd.cc8
-rw-r--r--deps/v8/src/platform-linux.cc11
-rw-r--r--deps/v8/src/platform-macos.cc8
-rw-r--r--deps/v8/src/platform-openbsd.cc8
-rw-r--r--deps/v8/src/platform-posix.cc38
-rw-r--r--deps/v8/src/platform-solaris.cc8
-rw-r--r--deps/v8/src/platform-win32.cc67
-rw-r--r--deps/v8/src/platform.h44
-rw-r--r--deps/v8/src/preparse-data.h6
-rw-r--r--deps/v8/src/preparser-api.cc30
-rw-r--r--deps/v8/src/preparser.cc8
-rw-r--r--deps/v8/src/preparser.h2
-rw-r--r--deps/v8/src/profile-generator-inl.h9
-rw-r--r--deps/v8/src/profile-generator.cc13
-rw-r--r--deps/v8/src/profile-generator.h7
-rw-r--r--deps/v8/src/regexp.js33
-rw-r--r--deps/v8/src/runtime-profiler.cc26
-rw-r--r--deps/v8/src/runtime-profiler.h1
-rw-r--r--deps/v8/src/runtime.cc134
-rw-r--r--deps/v8/src/runtime.h5
-rw-r--r--deps/v8/src/scanner-character-streams.cc86
-rw-r--r--deps/v8/src/scanner-character-streams.h34
-rwxr-xr-xdeps/v8/src/scanner.cc2
-rw-r--r--deps/v8/src/scanner.h79
-rw-r--r--deps/v8/src/serialize.cc49
-rw-r--r--deps/v8/src/serialize.h2
-rw-r--r--deps/v8/src/spaces.cc64
-rw-r--r--deps/v8/src/spaces.h7
-rw-r--r--deps/v8/src/string.js19
-rw-r--r--deps/v8/src/type-info.cc31
-rw-r--r--deps/v8/src/type-info.h5
-rw-r--r--deps/v8/src/unicode-inl.h15
-rw-r--r--deps/v8/src/unicode.cc11
-rw-r--r--deps/v8/src/unicode.h49
-rw-r--r--deps/v8/src/v8.cc23
-rw-r--r--deps/v8/src/v8.h1
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/x64/assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.h59
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc33
-rw-r--r--deps/v8/src/x64/codegen-x64.cc29
-rw-r--r--deps/v8/src/x64/disasm-x64.cc6
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc39
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc73
-rw-r--r--deps/v8/src/x64/lithium-x64.cc8
-rw-r--r--deps/v8/src/x64/lithium-x64.h17
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc14
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h2
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc15
-rw-r--r--deps/v8/src/zone.h2
-rw-r--r--deps/v8/test/cctest/cctest.status4
-rw-r--r--deps/v8/test/cctest/test-api.cc237
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc27
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc104
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc53
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc1
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc223
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js90
-rw-r--r--deps/v8/test/mjsunit/debug-set-script-source.js64
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-function-call.js17
-rw-r--r--deps/v8/test/mjsunit/getter-in-value-prototype.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js19
-rw-r--r--deps/v8/test/mjsunit/pixel-array-rounding.js44
-rw-r--r--deps/v8/test/mjsunit/regexp.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-115452.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-117794.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-119925.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1624-strict.js140
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1624.js139
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1973.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-119926.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sqrt.js47
-rw-r--r--deps/v8/test/mozilla/mozilla.status3
-rw-r--r--deps/v8/test/test262/test262.status16
-rw-r--r--deps/v8/tools/check-static-initializers.sh54
-rw-r--r--deps/v8/tools/common-includes.sh9
-rw-r--r--deps/v8/tools/gyp/v8.gyp18
-rw-r--r--deps/v8/tools/merge-to-branch.sh36
-rwxr-xr-xdeps/v8/tools/push-to-trunk.sh9
-rwxr-xr-xdeps/v8/tools/test-wrapper-gypbuild.py4
-rwxr-xr-xdeps/v8/tools/test.py6
199 files changed, 6023 insertions, 2240 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 869be2b74b..dfefad129f 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -51,3 +51,4 @@ Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
+Zhongping Wang <kewpie.w.zp@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 9ba13624ae..2240ec0e68 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,84 @@
+2012-03-23: Version 3.9.24
+
+ Activated count-based profiler for ARM.
+
+ Fixed use of proxies as f.prototype properties. (issue 2021)
+
+ Enabled snapshots on MIPS.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-03-21: Version 3.9.23
+
+ Use correct arguments adaptation environment when inlining function
+ containing arguments. (Issue 2014)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-03-20: Version 3.9.22
+
+ Enabled count-based profiler by default.
+
+ Implemented a hash based look-up to speed up address checks
+ in large object space (issue 853).
+
+ Performance and stability improvements on all platforms.
+
+
+2012-03-19: Version 3.9.21
+
+ Fixed push-to-trunk script (and re-push).
+
+ Added API call that identifies strings that are guaranteed only to
+ contain ASCII characters.
+
+
+2012-03-19: Version 3.9.20
+
+ Fixed declarations escaping global strict eval. (Issue 1624)
+
+ Fixed wrapping of receiver for non-strict callbacks. (Issue 1973)
+
+ Fixed function declarations overwriting read-only global properties.
+ (Chromium issue 115452)
+
+ Fixed --use-strict flag in combination with --harmony[-scoping].
+
+ Debugger: naive implementation of "step into Function.prototype.bind".
+
+ Debugger: added ability to set script source from within OnBeforeCompile
+
+ Added flag to always call DebugBreak on abort.
+
+ Re-enabled constructor inlining and inline === comparison with boolean
+ constants. (Issue 2009)
+
+ Don't use an explicit s0 in ClampDoubleToUint8. (Issue 2004)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-03-14: Version 3.9.19
+
+ Ensure there is a smi check of the receiver for global load and call
+ ICs (Chromium issue 117794).
+
+ Performance and stability improvements on all platforms.
+
+
+2012-03-13: Version 3.9.18
+
+ Ensure consistency of Math.sqrt on Intel platforms.
+
+ Remove static initializers in v8. (issue 1859)
+
+ Add explicit dependency on v8_base in the GYP-based build.
+
+ Performance and stability improvements on all platforms.
+
+
2012-03-12: Version 3.9.17
Fixed VFP detection through compiler defines. (issue 1996)
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index bfa53a7af4..34d0efc5ff 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -185,6 +185,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
+ 'mips_arch_variant:loongson': {
+ 'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
+ },
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@@ -194,6 +197,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
+ 'mips_arch_variant:loongson': {
+ 'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
+ },
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@@ -212,9 +218,12 @@ LIBRARY_FLAGS = {
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
+ 'fpu:on': {
+ 'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+ }
},
'mipsabi:hardfloat': {
- 'CPPDEFINES': ['__mips_hard_float=1'],
+ 'CPPDEFINES': ['__mips_hard_float=1', 'CAN_USE_FPU_INSTRUCTIONS'],
}
},
'arch:x64': {
@@ -545,6 +554,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
+ 'mips_arch_variant:loongson': {
+ 'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
+ },
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@@ -554,6 +566,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
+ 'mips_arch_variant:loongson': {
+ 'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
+ },
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@@ -563,7 +578,10 @@ SAMPLE_FLAGS = {
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
- 'LINKFLAGS': ['-mhard-float']
+ 'LINKFLAGS': ['-mhard-float'],
+ 'fpu:on': {
+ 'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+ }
}
}
},
@@ -697,6 +715,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
+ 'mips_arch_variant:loongson': {
+ 'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
+ },
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@@ -706,6 +727,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
+ 'mips_arch_variant:loongson': {
+ 'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
+ },
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@@ -1114,7 +1138,7 @@ SIMPLE_OPTIONS = {
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
- 'values': ['mips32r2', 'mips32r1'],
+ 'values': ['mips32r2', 'mips32r1', 'loongson'],
'default': 'mips32r2',
'help': 'mips variant'
},
@@ -1128,6 +1152,11 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'use vfp3 instructions when building the snapshot [Arm only]'
},
+ 'fpu': {
+ 'values': ['on', 'off'],
+ 'default': 'on',
+ 'help': 'use fpu instructions when building the snapshot [MIPS only]'
+ },
}
diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt
index 6676f37556..59f76ffc81 100644
--- a/deps/v8/benchmarks/README.txt
+++ b/deps/v8/benchmarks/README.txt
@@ -77,3 +77,10 @@ input strings.
Furthermore, the benchmark runner was changed to run the benchmarks
for at least a few times to stabilize the reported numbers on slower
machines.
+
+
+Changes from Version 6 to Version 7
+===================================
+
+Added the Navier-Stokes benchmark, a 2D differential equation solver
+that stresses arithmetic computations on double arrays.
diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html
index 6ff75be1e1..3ce9889592 100644
--- a/deps/v8/benchmarks/revisions.html
+++ b/deps/v8/benchmarks/revisions.html
@@ -19,6 +19,10 @@ not comparable unless both results are run with the same revision of
the benchmark suite.
</p>
+<div class="subtitle"><h3>Version 7 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v7/run.html">link</a>)</h3></div>
+
+<p>This version includes the new Navier-Stokes benchmark, a 2D differential
+ equation solver that stresses arithmetic computations on double arrays.</p>
<div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>
diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html
index 8786d1fb0c..f1d14c1887 100644
--- a/deps/v8/benchmarks/run.html
+++ b/deps/v8/benchmarks/run.html
@@ -53,16 +53,16 @@ function Run() {
BenchmarkSuite.RunSuites({ NotifyStep: ShowProgress,
NotifyError: AddError,
NotifyResult: AddResult,
- NotifyScore: AddScore });
+ NotifyScore: AddScore });
}
function ShowWarningIfObsolete() {
- // If anything goes wrong we will just catch the exception and no
+ // If anything goes wrong we will just catch the exception and no
// warning is shown, i.e., no harm is done.
try {
var xmlhttp;
- var next_version = parseInt(BenchmarkSuite.version) + 1;
- var next_version_url = "../v" + next_version + "/run.html";
+ var next_version = parseInt(BenchmarkSuite.version) + 1;
+ var next_version_url = "../v" + next_version + "/run.html";
if (window.XMLHttpRequest) {
xmlhttp = new window.XMLHttpRequest();
} else if (window.ActiveXObject) {
@@ -76,7 +76,7 @@ function ShowWarningIfObsolete() {
};
xmlhttp.send(null);
} catch(e) {
- // Ignore exception if check for next version fails.
+ // Ignore exception if check for next version fails.
// Hence no warning is displayed.
}
}
@@ -84,7 +84,7 @@ function ShowWarningIfObsolete() {
function Load() {
var version = BenchmarkSuite.version;
document.getElementById("version").innerHTML = version;
- ShowWarningIfObsolete();
+ ShowWarningIfObsolete();
setTimeout(Run, 200);
}
</script>
@@ -92,11 +92,11 @@ function Load() {
<body onload="Load()">
<div>
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
- <div class="warning" id="obsolete">
+ <div class="warning" id="obsolete">
Warning! This is not the latest version of the V8 benchmark
-suite. Consider running the
+suite. Consider running the
<a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html">
-latest version</a>.
+latest version</a>.
</div>
<table>
<tr>
@@ -118,7 +118,7 @@ higher scores means better performance: <em>Bigger is better!</em>
(<i>1761 lines</i>).
</li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li>
-<li><b>NavierStokes (beta)</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>396 lines</i>).</li>
+<li><b>NavierStokes</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>387 lines</i>).</li>
</ul>
<p>
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 6e1cbea7a1..5c0c323412 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -62,6 +62,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
+ # Default arch variant for MIPS.
+ 'mips_arch_variant%': 'mips32r2',
+
'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0,
@@ -184,6 +187,9 @@
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
@@ -209,6 +215,9 @@
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': ['_MIPS_ARCH_LOONGSON',],
+ }],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
@@ -305,7 +314,7 @@
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
- 'cflags': [ '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@@ -352,6 +361,7 @@
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
diff --git a/deps/v8/build/mipsu.gypi b/deps/v8/build/mipsu.gypi
index 306f105dbd..637ff841e4 100644
--- a/deps/v8/build/mipsu.gypi
+++ b/deps/v8/build/mipsu.gypi
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -29,6 +29,5 @@
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'mips',
- 'mips_arch_variant': 'mips32r2',
},
}
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e4037b9ac6..33179f5bf0 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1021,6 +1021,14 @@ class String : public Primitive {
V8EXPORT int Utf8Length() const;
/**
+ * A fast conservative check for non-ASCII characters. May
+ * return true even for ASCII strings, but if it returns
+ * false you can be sure that all characters are in the range
+ * 0-127.
+ */
+ V8EXPORT bool MayContainNonAscii() const;
+
+ /**
* Write the contents of the string to an external buffer.
* If no arguments are given, expects the buffer to be large
* enough to hold the entire string and NULL terminator. Copies
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index fde7a80765..0d0b5357d5 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -101,6 +101,7 @@ SOURCES = {
objects.cc
objects-printer.cc
objects-visiting.cc
+ once.cc
parser.cc
preparser.cc
preparse-data.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index d8c7ba0eaa..49a026be20 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1430,7 +1430,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
- i::Utf8ToUC16CharacterStream stream(
+ i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
@@ -1439,11 +1439,11 @@ ScriptData* ScriptData::PreCompile(const char* input, int length) {
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
if (str->IsExternalTwoByteString()) {
- i::ExternalTwoByteStringUC16CharacterStream stream(
+ i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
} else {
- i::GenericStringUC16CharacterStream stream(str, 0, str->length());
+ i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
}
@@ -3064,8 +3064,11 @@ bool Object::SetAccessor(Handle<String> name,
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
+ bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
- return !result.is_null() && !result->IsUndefined();
+ if (result.is_null() || result->IsUndefined()) return false;
+ if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
+ return true;
}
@@ -3690,7 +3693,104 @@ int String::Length() const {
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
- return str->Utf8Length();
+ return i::Utf8Length(str);
+}
+
+
+// Will fail with a negative answer if the recursion depth is too high.
+static int RecursivelySerializeToUtf8(i::String* string,
+ char* buffer,
+ int start,
+ int end,
+ int recursion_budget,
+ int32_t previous_character,
+ int32_t* last_character) {
+ int utf8_bytes = 0;
+ while (true) {
+ if (string->IsAsciiRepresentation()) {
+ i::String::WriteToFlat(string, buffer, start, end);
+ *last_character = unibrow::Utf16::kNoPreviousCharacter;
+ return utf8_bytes + end - start;
+ }
+ switch (i::StringShape(string).representation_tag()) {
+ case i::kExternalStringTag: {
+ const uint16_t* data = i::ExternalTwoByteString::cast(string)->
+ ExternalTwoByteStringGetData(0);
+ char* current = buffer;
+ for (int i = start; i < end; i++) {
+ uint16_t character = data[i];
+ current +=
+ unibrow::Utf8::Encode(current, character, previous_character);
+ previous_character = character;
+ }
+ *last_character = previous_character;
+ return static_cast<int>(utf8_bytes + current - buffer);
+ }
+ case i::kSeqStringTag: {
+ const uint16_t* data =
+ i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
+ char* current = buffer;
+ for (int i = start; i < end; i++) {
+ uint16_t character = data[i];
+ current +=
+ unibrow::Utf8::Encode(current, character, previous_character);
+ previous_character = character;
+ }
+ *last_character = previous_character;
+ return static_cast<int>(utf8_bytes + current - buffer);
+ }
+ case i::kSlicedStringTag: {
+ i::SlicedString* slice = i::SlicedString::cast(string);
+ unsigned offset = slice->offset();
+ string = slice->parent();
+ start += offset;
+ end += offset;
+ continue;
+ }
+ case i::kConsStringTag: {
+ i::ConsString* cons_string = i::ConsString::cast(string);
+ i::String* first = cons_string->first();
+ int boundary = first->length();
+ if (start >= boundary) {
+ // Only need RHS.
+ string = cons_string->second();
+ start -= boundary;
+ end -= boundary;
+ continue;
+ } else if (end <= boundary) {
+ // Only need LHS.
+ string = first;
+ } else {
+ if (recursion_budget == 0) return -1;
+ int extra_utf8_bytes =
+ RecursivelySerializeToUtf8(first,
+ buffer,
+ start,
+ boundary,
+ recursion_budget - 1,
+ previous_character,
+ &previous_character);
+ if (extra_utf8_bytes < 0) return extra_utf8_bytes;
+ buffer += extra_utf8_bytes;
+ utf8_bytes += extra_utf8_bytes;
+ string = cons_string->second();
+ start = 0;
+ end -= boundary;
+ }
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+bool String::MayContainNonAscii() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
+ return false;
+ }
+ return !str->HasOnlyAsciiChars();
}
@@ -3703,11 +3803,12 @@ int String::WriteUtf8(char* buffer,
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
- len = str->length();
+ len = string_length;
} else {
len = i::Min(capacity, str->length());
}
@@ -3720,6 +3821,42 @@ int String::WriteUtf8(char* buffer,
return len;
}
+ if (capacity == -1 || capacity / 3 >= string_length) {
+ int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
+ const int kMaxRecursion = 100;
+ int utf8_bytes =
+ RecursivelySerializeToUtf8(*str,
+ buffer,
+ 0,
+ string_length,
+ kMaxRecursion,
+ previous,
+ &previous);
+ if (utf8_bytes >= 0) {
+ // Success serializing with recursion.
+ if ((options & NO_NULL_TERMINATION) == 0 &&
+ (capacity > utf8_bytes || capacity == -1)) {
+ buffer[utf8_bytes++] = '\0';
+ }
+ if (nchars_ref != NULL) *nchars_ref = string_length;
+ return utf8_bytes;
+ }
+ FlattenString(str);
+ // Recurse once. This time around the string is flat and the serializing
+ // with recursion will certainly succeed.
+ return WriteUtf8(buffer, capacity, nchars_ref, options);
+ } else if (capacity >= string_length) {
+ // First check that the buffer is large enough. If it is, then recurse
+ // once without a capacity limit, which will get into the other branch of
+ // this 'if'.
+ int utf8_bytes = i::Utf8Length(str);
+ if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
+ if (utf8_bytes <= capacity) {
+ return WriteUtf8(buffer, -1, nchars_ref, options);
+ }
+ }
+
+ // Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
@@ -3736,11 +3873,13 @@ int String::WriteUtf8(char* buffer,
int i;
int pos = 0;
int nchars = 0;
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(buffer + pos, c);
+ int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
pos += written;
nchars++;
+ previous = c;
}
if (i < len) {
// For the last characters we need to check the length for each one
@@ -3749,16 +3888,33 @@ int String::WriteUtf8(char* buffer,
char intermediate[unibrow::Utf8::kMaxEncodedSize];
for (; i < len && pos < capacity; i++) {
i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(intermediate, c);
- if (pos + written <= capacity) {
- for (int j = 0; j < written; j++)
- buffer[pos + j] = intermediate[j];
+ if (unibrow::Utf16::IsTrailSurrogate(c) &&
+ unibrow::Utf16::IsLeadSurrogate(previous)) {
+ // We can't use the intermediate buffer here because the encoding
+ // of surrogate pairs is done under assumption that you can step
+ // back and fix the UTF8 stream. Luckily we only need space for one
+ // more byte, so there is always space.
+ ASSERT(pos < capacity);
+ int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
+ ASSERT(written == 1);
pos += written;
nchars++;
} else {
- // We've reached the end of the buffer
- break;
+ int written =
+ unibrow::Utf8::Encode(intermediate,
+ c,
+ unibrow::Utf16::kNoPreviousCharacter);
+ if (pos + written <= capacity) {
+ for (int j = 0; j < written; j++)
+ buffer[pos + j] = intermediate[j];
+ pos += written;
+ nchars++;
+ } else {
+ // We've reached the end of the buffer
+ break;
+ }
}
+ previous = c;
}
}
if (nchars_ref != NULL) *nchars_ref = nchars;
@@ -4014,7 +4170,7 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
bool v8::V8::Initialize() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ i::Isolate* isolate = i::Isolate::Current();
if (isolate != NULL && isolate->IsInitialized()) {
return true;
}
@@ -4907,7 +5063,7 @@ Local<Number> v8::Number::New(double value) {
Local<Integer> v8::Integer::New(int32_t value) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
if (i::Smi::IsValid(value)) {
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
@@ -5185,7 +5341,7 @@ bool V8::IsExecutionTerminating(Isolate* isolate) {
Isolate* Isolate::GetCurrent() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ i::Isolate* isolate = i::Isolate::Current();
return reinterpret_cast<Isolate*>(isolate);
}
@@ -5240,7 +5396,8 @@ String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
- length_ = str->Utf8Length();
+ i::Handle<i::String> i_str = Utils::OpenHandle(*str);
+ length_ = i::Utf8Length(i_str);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
}
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index dd8ffcd77c..d5db686c0e 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -80,7 +80,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
@@ -364,8 +364,14 @@ Address Assembler::target_address_at(Address pc) {
}
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_external_target_at(Address constant_pool_entry,
+ Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index ff15221119..ec28da4002 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -139,7 +139,6 @@ bool RelocInfo::IsCodedSpecially() {
}
-
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -238,25 +237,27 @@ MemOperand::MemOperand(Register rn, Register rm,
// add(sp, sp, 4) instruction (aka Pop())
const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+ al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
+ kRegister_sp_Code * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | sp.code() * B16;
+ al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | sp.code() * B16;
+ al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
// mov lr, pc
-const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
+const Instr kBlxIp = al | kBlxRegPattern | ip.code();
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@@ -273,13 +274,13 @@ const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | fp.code() * B16;
+ al | B26 | L | Offset | kRegister_fp_Code * B16;
const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | fp.code() * B16;
+ al | B26 | Offset | kRegister_fp_Code * B16;
const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | fp.code() * B16;
+ al | B26 | L | NegOffset | kRegister_fp_Code * B16;
const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | fp.code() * B16;
+ al | B26 | NegOffset | kRegister_fp_Code * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 11e39df682..e2d5f598b7 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -124,24 +124,47 @@ struct Register {
int code_;
};
-const Register no_reg = { -1 };
-
-const Register r0 = { 0 };
-const Register r1 = { 1 };
-const Register r2 = { 2 };
-const Register r3 = { 3 };
-const Register r4 = { 4 };
-const Register r5 = { 5 };
-const Register r6 = { 6 };
-const Register r7 = { 7 };
-const Register r8 = { 8 }; // Used as context register.
-const Register r9 = { 9 }; // Used as lithium codegen scratch register.
-const Register r10 = { 10 }; // Used as roots register.
-const Register fp = { 11 };
-const Register ip = { 12 };
-const Register sp = { 13 };
-const Register lr = { 14 };
-const Register pc = { 15 };
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
+
+const Register no_reg = { kRegister_no_reg_Code };
+
+const Register r0 = { kRegister_r0_Code };
+const Register r1 = { kRegister_r1_Code };
+const Register r2 = { kRegister_r2_Code };
+const Register r3 = { kRegister_r3_Code };
+const Register r4 = { kRegister_r4_Code };
+const Register r5 = { kRegister_r5_Code };
+const Register r6 = { kRegister_r6_Code };
+const Register r7 = { kRegister_r7_Code };
+// Used as context register.
+const Register r8 = { kRegister_r8_Code };
+// Used as lithium codegen scratch register.
+const Register r9 = { kRegister_r9_Code };
+// Used as roots register.
+const Register r10 = { kRegister_r10_Code };
+const Register fp = { kRegister_fp_Code };
+const Register ip = { kRegister_ip_Code };
+const Register sp = { kRegister_sp_Code };
+const Register lr = { kRegister_lr_Code };
+const Register pc = { kRegister_pc_Code };
+
// Single word VFP register.
struct SwVfpRegister {
@@ -581,6 +604,7 @@ extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kBlxIp;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
@@ -662,20 +686,18 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
+ Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 250f020a62..f772db9be2 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -480,7 +480,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
+ const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
@@ -4237,7 +4237,7 @@ Register InstanceofStub::right() { return r1; }
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
- static const int kDisplacement =
+ const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smi.
@@ -4622,10 +4622,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// sp[8]: subject string
// sp[12]: JSRegExp object
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime, invoke_regexp;
@@ -4824,8 +4824,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- static const int kParameterRegisters = 4;
+ const int kRegExpExecuteArguments = 8;
+ const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -5714,7 +5714,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// scratch: -
// Perform a number of probes in the symbol table.
- static const int kProbes = 4;
+ const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@@ -5839,9 +5839,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
__ Ldrd(r2, r3, MemOperand(sp, kToOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
@@ -7085,43 +7085,45 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+#define REG(Name) { kRegister_ ## Name ## _Code }
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { r6, r4, r7, EMIT_REMEMBERED_SET },
- { r6, r2, r7, EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
- { r3, r4, r5, EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
- { r4, r1, r2, OMIT_REMEMBERED_SET },
+ { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r1, r2, r3, EMIT_REMEMBERED_SET },
- { r3, r2, r1, EMIT_REMEMBERED_SET },
+ { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r2, r1, r3, EMIT_REMEMBERED_SET },
- { r3, r1, r2, EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { r3, r2, r4, EMIT_REMEMBERED_SET },
- { r2, r3, r4, EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { r2, r3, r9, EMIT_REMEMBERED_SET },
- { r2, r3, r9, OMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
- { r6, r2, r0, EMIT_REMEMBERED_SET },
- { r2, r6, r9, EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
- { r5, r0, r6, EMIT_REMEMBERED_SET },
+ { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7148,7 +7150,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6e18277783..befd8f2de7 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -37,8 +37,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-TranscendentalFunction CreateTranscendentalFunction(
- TranscendentalCache::Type type) {
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
@@ -50,6 +49,10 @@ TranscendentalFunction CreateTranscendentalFunction(
}
+UnaryMathFunction CreateSqrtFunction() {
+ return &sqrt;
+}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index d9a4d4b0f4..7b2a3c4fc1 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -108,6 +108,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
+static const int32_t kBranchBeforeStackCheck = 0x2a000001;
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
@@ -118,10 +122,16 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
- (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
@@ -155,13 +165,21 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+4, cs);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ patcher.masm()->b(+4, cs);
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 77f4e4414d..0cbd46ed1d 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -109,7 +110,9 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
return 24;
}
@@ -132,32 +135,11 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- // We can optionally optimize based on counters rather than statistical
- // sampling.
- if (info->ShouldSelfOptimize()) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[adding self-optimization header to %s]\n",
- *info->function()->debug_name()->ToCString());
- }
- has_self_optimization_header_ = true;
- MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
- Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
- JSGlobalPropertyCell* cell;
- if (maybe_cell->To(&cell)) {
- __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- Handle<Code> compile_stub(
- isolate()->builtins()->builtin(Builtins::kLazyRecompile));
- __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq);
- ASSERT(masm_->pc_offset() == self_optimization_header_size());
- }
- }
-
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -336,20 +318,68 @@ void FullCodeGenerator::ClearAccumulator() {
}
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r2, Operand(profiling_counter_));
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+ // Self-optimization is a one-off thing: if it fails, don't try again.
+ reset_value = Smi::kMaxValue;
+ }
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
+ __ mov(r2, Operand(profiling_counter_));
+ __ mov(r3, Operand(Smi::FromInt(reset_value)));
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
+
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -371,6 +401,32 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else if (FLAG_weighted_back_edges) {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(r2);
+ __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+ } else {
+ InterruptStub stub;
+ __ CallStub(&stub);
+ }
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+ }
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -888,7 +944,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
@@ -1186,7 +1242,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, mode);
+ CallIC(ic, mode);
}
@@ -1270,7 +1326,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
break;
}
@@ -1410,6 +1466,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ push(r1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1445,6 +1511,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1470,7 +1537,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1493,27 +1560,29 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- } else {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- VisitForStackValue(value);
- }
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
+ __ push(r0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ ldr(r0, MemOperand(sp));
@@ -1736,7 +1805,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1744,7 +1813,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1771,7 +1840,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1854,7 +1923,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -1895,7 +1964,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1908,7 +1977,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
}
@@ -1925,7 +1994,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -2043,7 +2112,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2089,7 +2158,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2123,6 +2192,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
}
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+ ic_total_count_++;
+ __ Call(code, rmode, ast_id);
+}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@@ -2140,7 +2217,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2173,7 +2250,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3770,7 +3847,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3925,7 +4002,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -4036,7 +4113,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4068,7 +4145,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4085,7 +4162,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4111,7 +4188,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic);
+ CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4294,7 +4371,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 36421d9bd3..cdc1947d4e 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -1098,6 +1098,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index ae19677fda..62cde6e249 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -178,7 +178,8 @@ class LCodeGen;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
- V(DateField)
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -468,6 +469,20 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 012ea458ff..82b80a2b80 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -648,7 +648,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -2800,15 +2799,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2849,6 +2843,18 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(r0)); // Used for parameter count.
+ ASSERT(function.is(r1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(r0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -4601,34 +4607,51 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, elements_offset + i));
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
+ // Copy elements backing store content.
+ int elements_length = has_elements ? elements->length() : 0;
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ // We only support little endian mode...
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(r2, Operand(value_low));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ mov(r2, Operand(value_high));
+ __ str(r2, FieldMemOperand(result, total_offset + 4));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
} else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
+ UNREACHABLE();
}
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 45dd80ffb7..857c2bf770 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -3647,8 +3647,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
bind(&in_bounds);
Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(s0, temp_double_reg);
- vmov(result_reg, s0);
+ vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
+ vmov(result_reg, temp_double_reg.low());
bind(&done);
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index de83c13e15..10ff2dd96c 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -472,7 +472,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 74fca2e4c5..06f8385af9 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1387,14 +1387,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(r0, miss);
CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
}
@@ -2813,14 +2807,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, &miss);
- }
-
// Check that the map of the global has not changed.
+ __ JumpIfSmi(r0, &miss);
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 07509b5b28..4944202f07 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -45,6 +45,7 @@
#include "ic.h"
#include "isolate.h"
#include "jsregexp.h"
+#include "lazy-instance.h"
#include "platform.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
@@ -84,15 +85,36 @@
namespace v8 {
namespace internal {
+// -----------------------------------------------------------------------------
+// Common double constants.
+
+struct DoubleConstant BASE_EMBEDDED {
+ double min_int;
+ double one_half;
+ double minus_zero;
+ double zero;
+ double uint8_max_value;
+ double negative_infinity;
+ double canonical_non_hole_nan;
+ double the_hole_nan;
+};
+
+struct InitializeDoubleConstants {
+ static void Construct(DoubleConstant* double_constants) {
+ double_constants->min_int = kMinInt;
+ double_constants->one_half = 0.5;
+ double_constants->minus_zero = -0.0;
+ double_constants->uint8_max_value = 255;
+ double_constants->zero = 0.0;
+ double_constants->canonical_non_hole_nan = OS::nan_value();
+ double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
+ double_constants->negative_infinity = -V8_INFINITY;
+ }
+};
+
+static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
+ double_constants = LAZY_INSTANCE_INITIALIZER;
-const double DoubleConstant::min_int = kMinInt;
-const double DoubleConstant::one_half = 0.5;
-const double DoubleConstant::minus_zero = -0.0;
-const double DoubleConstant::uint8_max_value = 255;
-const double DoubleConstant::zero = 0.0;
-const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
-const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
-const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
@@ -937,49 +959,49 @@ ExternalReference ExternalReference::scheduled_exception_address(
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::min_int)));
+ &double_constants.Pointer()->min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::one_half)));
+ &double_constants.Pointer()->one_half));
}
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::minus_zero)));
+ &double_constants.Pointer()->minus_zero));
}
ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::zero)));
+ &double_constants.Pointer()->zero));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::uint8_max_value)));
+ &double_constants.Pointer()->uint8_max_value));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::negative_infinity)));
+ &double_constants.Pointer()->negative_infinity));
}
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
+ &double_constants.Pointer()->canonical_non_hole_nan));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::the_hole_nan)));
+ &double_constants.Pointer()->the_hole_nan));
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 5063879ae4..918a2a679b 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -67,21 +67,6 @@ class AssemblerBase: public Malloced {
int jit_cookie_;
};
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-class DoubleConstant: public AllStatic {
- public:
- static const double min_int;
- static const double one_half;
- static const double minus_zero;
- static const double zero;
- static const double uint8_max_value;
- static const double negative_infinity;
- static const double canonical_non_hole_nan;
- static const double the_hole_nan;
-};
-
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 239e5d0ffe..4b6ae680a4 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -399,6 +399,9 @@ bool FunctionDeclaration::IsInlineable() const {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
+ is_uninitialized_ = oracle->LoadIsUninitialized(this);
+ if (is_uninitialized_) return;
+
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
@@ -602,6 +605,13 @@ void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
+void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
+ ? oracle->GetObjectLiteralStoreMap(this)
+ : Handle<Map>::null();
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@@ -1054,8 +1064,6 @@ void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
increase_node_count();
- add_flag(kDontOptimize);
- add_flag(kDontInline);
add_flag(kDontSelfOptimize);
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 09864885e9..b827302ebd 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -1320,6 +1320,11 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value() { return value_; }
Kind kind() { return kind_; }
+ // Type feedback information.
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ bool IsMonomorphic() { return !receiver_type_.is_null(); }
+ Handle<Map> GetReceiverType() { return receiver_type_; }
+
bool IsCompileTimeValue();
void set_emit_store(bool emit_store);
@@ -1336,6 +1341,7 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value_;
Kind kind_;
bool emit_store_;
+ Handle<Map> receiver_type_;
};
DECLARE_NODE_TYPE(ObjectLiteral)
@@ -1360,6 +1366,12 @@ class ObjectLiteral: public MaterializedLiteral {
kHasFunction = 1 << 1
};
+ struct Accessors: public ZoneObject {
+ Accessors() : getter(NULL), setter(NULL) { }
+ Expression* getter;
+ Expression* setter;
+ };
+
protected:
template<class> friend class AstNodeFactory;
@@ -1515,6 +1527,7 @@ class Property: public Expression {
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
+ bool IsUninitialized() { return is_uninitialized_; }
protected:
template<class> friend class AstNodeFactory;
@@ -1528,6 +1541,7 @@ class Property: public Expression {
key_(key),
pos_(pos),
is_monomorphic_(false),
+ is_uninitialized_(false),
is_array_length_(false),
is_string_length_(false),
is_string_access_(false),
@@ -1540,6 +1554,7 @@ class Property: public Expression {
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
+ bool is_uninitialized_ : 1;
bool is_array_length_ : 1;
bool is_string_length_ : 1;
bool is_string_access_ : 1;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index ca202f2dc4..0f493e6e57 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -33,6 +33,7 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
+#include "heap-profiler.h"
#include "mark-compact.h"
#include "vm-state-inl.h"
@@ -380,6 +381,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
+ HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
+ elms->address() + size_delta));
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@@ -508,8 +511,7 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
@@ -645,8 +647,7 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@@ -757,8 +758,7 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, k,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
@@ -831,9 +831,8 @@ BUILTIN(ArraySplice) {
if (!maybe_array->To(&result_array)) return maybe_array;
{
- AssertNoAllocation no_gc;
// Fill newly created array.
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, actual_start,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
}
@@ -883,12 +882,11 @@ BUILTIN(ArraySplice) {
FixedArray* new_elms = FixedArray::cast(obj);
{
- AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
@@ -973,14 +971,13 @@ BUILTIN(ArrayConcat) {
if (result_len == 0) return result_array;
// Copy data.
- AssertNoAllocation no_gc;
int start_pos = 0;
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
@@ -1570,30 +1567,30 @@ struct BuiltinDesc {
BuiltinExtraArguments extra_args;
};
+#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
+
class BuiltinFunctionTable {
public:
- BuiltinFunctionTable() {
- Builtins::InitBuiltinFunctionTable();
+ BuiltinDesc* functions() {
+ CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
+ return functions_;
}
- static const BuiltinDesc* functions() { return functions_; }
-
- private:
- static BuiltinDesc functions_[Builtins::builtin_count + 1];
+ OnceType once_;
+ BuiltinDesc functions_[Builtins::builtin_count + 1];
friend class Builtins;
};
-BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
-
-static const BuiltinFunctionTable builtin_function_table_init;
+static BuiltinFunctionTable builtin_function_table =
+ BUILTIN_FUNCTION_TABLE_INIT;
// Define array of pointers to generators and C builtin functions.
// We do this in a sort of roundabout way so that we can do the initialization
// within the lexical scope of Builtins:: and within a context where
// Code::Flags names a non-abstract type.
void Builtins::InitBuiltinFunctionTable() {
- BuiltinDesc* functions = BuiltinFunctionTable::functions_;
+ BuiltinDesc* functions = builtin_function_table.functions_;
functions[builtin_count].generator = NULL;
functions[builtin_count].c_code = NULL;
functions[builtin_count].s_name = NULL;
@@ -1637,7 +1634,7 @@ void Builtins::SetUp(bool create_heap_objects) {
// Create a scope for the handles in the builtins.
HandleScope scope(isolate);
- const BuiltinDesc* functions = BuiltinFunctionTable::functions();
+ const BuiltinDesc* functions = builtin_function_table.functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 3f651205ff..0163580e90 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -71,13 +71,6 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
} else {
print_source = FLAG_print_source;
print_ast = FLAG_print_ast;
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if (print_source && !filter.is_empty()) {
- print_source = info->function()->name()->IsEqualTo(filter);
- }
- if (print_ast && !filter.is_empty()) {
- print_ast = info->function()->name()->IsEqualTo(filter);
- }
ftype = "user-defined";
}
@@ -124,11 +117,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- FunctionLiteral* function = info->function();
- bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
- if (print_code && match) {
+ if (print_code) {
// Print the source code if available.
+ FunctionLiteral* function = info->function();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 28a3006e1c..50d70f265d 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -87,10 +87,10 @@ namespace internal {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
-typedef double (*TranscendentalFunction)(double x);
+typedef double (*UnaryMathFunction)(double x);
-TranscendentalFunction CreateTranscendentalFunction(
- TranscendentalCache::Type type);
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
+UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index d689e871b7..2272337739 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -243,12 +243,15 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
// Take --hydrogen-filter into account.
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
Handle<String> name = info->function()->debug_name();
- bool match = filter.is_empty() || name->IsEqualTo(filter);
- if (!match) {
- info->SetCode(code);
- return true;
+ if (*FLAG_hydrogen_filter != '\0') {
+ Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+ if ((filter[0] == '-'
+ && name->IsEqualTo(filter.SubVector(1, filter.length())))
+ || (filter[0] != '-' && !name->IsEqualTo(filter))) {
+ info->SetCode(code);
+ return true;
+ }
}
// Recompile the unoptimized version of the code if the current version
@@ -450,6 +453,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
+ script->set_compilation_state(
+ Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
isolate->debugger()->OnAfterCompile(
@@ -518,7 +524,9 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
- if (FLAG_use_strict) info.SetLanguageMode(STRICT_MODE);
+ if (FLAG_use_strict) {
+ info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
+ }
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index ad35af6c22..45781cf0d4 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -1436,6 +1436,13 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (!options.last_run) {
context.Dispose();
+#if !defined(V8_SHARED)
+ if (i::FLAG_send_idle_notification) {
+ const int kLongIdlePauseInMs = 1000;
+ V8::ContextDisposedNotification();
+ V8::IdleNotification(kLongIdlePauseInMs);
+ }
+#endif // !V8_SHARED
}
#ifndef V8_SHARED
@@ -1490,6 +1497,7 @@ int Shell::Main(int argc, char* argv[]) {
int stress_runs = i::FLAG_stress_runs;
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+ options.last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
#endif
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index c30afa85db..511663d8ee 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -372,8 +372,11 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
// Calculate the message size in UTF-8 encoding.
int utf8_len = 0;
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < message.length(); i++) {
- utf8_len += unibrow::Utf8::Length(message[i]);
+ uint16_t character = message[i];
+ utf8_len += unibrow::Utf8::Length(character, previous);
+ previous = character;
}
// Send the header.
@@ -388,17 +391,33 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
+ previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < message.length(); i++) {
// Write next UTF-8 encoded character to buffer.
+ uint16_t character = message[i];
buffer_position +=
- unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
+ unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
ASSERT(buffer_position < kBufferSize);
// Send buffer if full or last character is encoded.
- if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
- conn->Send(buffer, buffer_position);
- buffer_position = 0;
+ if (kBufferSize - buffer_position <
+ unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
+ i == message.length() - 1) {
+ if (unibrow::Utf16::IsLeadSurrogate(character)) {
+ const int kEncodedSurrogateLength =
+ unibrow::Utf16::kUtf8BytesToCodeASurrogate;
+ ASSERT(buffer_position >= kEncodedSurrogateLength);
+ conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
+ for (int i = 0; i < kEncodedSurrogateLength; i++) {
+ buffer[i] = buffer[buffer_position + i];
+ }
+ buffer_position = kEncodedSurrogateLength;
+ } else {
+ conn->Send(buffer, buffer_position);
+ buffer_position = 0;
+ }
}
+ previous = character;
}
return true;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 2058d48b71..01f6f398af 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1223,6 +1223,18 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
}
+void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
+ Handle<FixedArray> new_bindings(function->function_bindings());
+ Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
+
+ if (!bindee.is_null() && bindee->IsJSFunction() &&
+ !JSFunction::cast(*bindee)->IsBuiltin()) {
+ Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
+ Debug::FloodWithOneShot(shared_info);
+ }
+}
+
+
void Debug::FloodHandlerWithOneShot() {
// Iterate through the JavaScript stack looking for handlers.
StackFrame::Id id = break_frame_id();
@@ -1442,8 +1454,10 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
expressions_count - 2 - call_function_arg_count);
if (fun->IsJSFunction()) {
Handle<JSFunction> js_function(JSFunction::cast(fun));
- // Don't step into builtins.
- if (!js_function->IsBuiltin()) {
+ if (js_function->shared()->bound()) {
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ } else if (!js_function->IsBuiltin()) {
+ // Don't step into builtins.
// It will also compile target function if it's not compiled yet.
FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
}
@@ -1639,8 +1653,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// Flood the function with one-shot break points if it is called from where
// step into was requested.
if (fp == step_in_fp()) {
- // Don't allow step into functions in the native context.
- if (!function->IsBuiltin()) {
+ if (function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(function);
+ } else if (!function->IsBuiltin()) {
+ // Don't allow step into functions in the native context.
if (function->shared()->code() ==
Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index b9384e574d..474b90bd21 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -239,6 +239,7 @@ class Debug {
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index d069a4507e..2a30ddd3da 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -358,8 +358,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_count_(0),
jsframe_count_(0),
output_(NULL),
- frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
- has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@@ -847,7 +845,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::ARGUMENTS_OBJECT: {
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
- ASSERT(frame_index == 0); // Only supported for first frame.
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 7699222b76..6bc4a51036 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -220,11 +220,6 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int frame_alignment_marker_offset() {
- return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
- static int has_alignment_padding_offset() {
- return OFFSET_OF(Deoptimizer, has_alignment_padding_);
- }
static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -337,10 +332,6 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- // Frames can be dynamically padded on ia32 to align untagged doubles.
- Object* frame_alignment_marker_;
- intptr_t has_alignment_padding_;
-
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 331f6bc4b4..1d043a153e 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -131,95 +131,132 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
-void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
- FixedArray* from_obj,
+void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
- FixedArray* to_obj,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
- ASSERT(to_obj->map() != HEAP->fixed_cow_array_map());
+ int raw_copy_size) {
+ ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
+ }
+ }
+#endif
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- Address to = to_obj->address() + FixedArray::kHeaderSize;
- Address from = from_obj->address() + FixedArray::kHeaderSize;
- CopyWords(reinterpret_cast<Object**>(to) + to_start,
- reinterpret_cast<Object**>(from) + from_start,
+ Address to_address = to->address() + FixedArray::kHeaderSize;
+ Address from_address = from->address() + FixedArray::kHeaderSize;
+ CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
+ reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Heap* heap = from_obj->GetHeap();
- WriteBarrierMode mode = to_obj->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(to_obj->address(),
- to_obj->OffsetOfElementAt(to_start),
+ Heap* heap = from->GetHeap();
+ if (!heap->InNewSpace(to)) {
+ heap->RecordWrites(to->address(),
+ to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to_obj);
+ heap->incremental_marking()->RecordWrites(to);
}
}
-
-
static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ Heap* heap = from->GetHeap();
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->max_number_key() + 1 - from_start;
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
+ }
+ }
+#endif
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length());
ASSERT(to != from);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- ASSERT(copy_size == -1 ||
- (copy_size + static_cast<int>(to_start)) <= to->length());
- WriteBarrierMode mode = to_kind == FAST_ELEMENTS
- ? UPDATE_WRITE_BARRIER
- : SKIP_WRITE_BARRIER;
- uint32_t copy_limit = (copy_size == -1)
- ? to->length()
- : Min(to_start + copy_size, static_cast<uint32_t>(to->length()));
- for (int i = 0; i < from->Capacity(); ++i) {
- Object* key = from->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- if (entry >= to_start && entry < copy_limit) {
- Object* value = from->ValueAt(i);
- ASSERT(to_kind == FAST_ELEMENTS || value->IsSmi());
- to->set(entry, value, mode);
- }
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from->FindEntry(i + from_start);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ Object* value = from->ValueAt(entry);
+ ASSERT(!value->IsTheHole());
+ to->set(i + to_start, value, SKIP_WRITE_BARRIER);
+ } else {
+ to->set_the_hole(i + to_start);
+ }
+ }
+ if (to_kind == FAST_ELEMENTS) {
+ if (!heap->InNewSpace(to)) {
+ heap->RecordWrites(to->address(),
+ to->OffsetOfElementAt(to_start),
+ copy_size);
}
+ heap->incremental_marking()->RecordWrites(to);
}
}
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedDoubleArray* from_obj,
+ FixedDoubleArray* from,
uint32_t from_start,
- FixedArray* to_obj,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int raw_copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
+ }
+ }
+#endif
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
- if (copy_size == 0) return from_obj;
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
- MaybeObject* maybe_value = from_obj->get(i + from_start);
+ MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
@@ -229,42 +266,109 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
// can't be taken from new space.
if (!maybe_value->ToObject(&value)) {
ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from_obj->GetHeap();
+ Heap* heap = from->GetHeap();
MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from_obj->get_scalar(i + from_start),
+ heap->AllocateHeapNumber(from->get_scalar(i + from_start),
TENURED);
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
- to_obj->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+ to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
}
}
- return to_obj;
+ return to;
}
-static void CopyDoubleToDoubleElements(FixedDoubleArray* from_obj,
+static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
uint32_t from_start,
- FixedDoubleArray* to_obj,
+ FixedDoubleArray* to,
uint32_t to_start,
- int copy_size) {
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- Address to = to_obj->address() + FixedDoubleArray::kHeaderSize;
- Address from = from_obj->address() + FixedDoubleArray::kHeaderSize;
- to += kDoubleSize * to_start;
- from += kDoubleSize * from_start;
+ Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
+ Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
+ to_address += kDoubleSize * to_start;
+ from_address += kDoubleSize * from_start;
int words_per_double = (kDoubleSize / kPointerSize);
- CopyWords(reinterpret_cast<Object**>(to),
- reinterpret_cast<Object**>(from),
+ CopyWords(reinterpret_cast<Object**>(to_address),
+ reinterpret_cast<Object**>(from_address),
words_per_double * copy_size);
}
+static void CopyObjectToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ Object* hole_or_object = from->get(i + from_start);
+ if (hole_or_object->IsTheHole()) {
+ to->set_the_hole(i + to_start);
+ } else {
+ to->set(i + to_start, hole_or_object->Number());
+ }
+ }
+}
+
+
+static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (copy_size < 0) {
+ ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
+ copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->max_number_key() + 1 - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT(copy_size + static_cast<int>(to_start) <= to->length());
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from->FindEntry(i + from_start);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ to->set(i + to_start, from->ValueAt(entry)->Number());
+ } else {
+ to->set_the_hole(i + to_start);
+ }
+ }
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -384,6 +488,9 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = from_holder->elements();
}
+ if (from->length() == 0) {
+ return from;
+ }
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, copy_size);
}
@@ -626,12 +733,16 @@ class FastObjectElementsAccessor
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
CopyObjectToObjectElements(
- &no_gc, FixedArray::cast(from), ElementsTraits::Kind, from_start,
+ FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
}
+ case FAST_DOUBLE_ELEMENTS:
+ CopyObjectToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ return from;
default:
UNREACHABLE();
}
@@ -726,7 +837,8 @@ class FastDoubleElementsAccessor
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
- return !backing_store->is_the_hole(key);
+ return key < static_cast<uint32_t>(backing_store->length()) &&
+ !backing_store->is_the_hole(key);
}
};
@@ -997,6 +1109,11 @@ class DictionaryElementsAccessor
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
+ case FAST_DOUBLE_ELEMENTS:
+ CopyDictionaryToDoubleElements(
+ SeededNumberDictionary::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ return from;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 5b5be23b10..ff97c08324 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -88,6 +88,15 @@ class ElementsAccessor {
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
+ // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
+ // of elements from source after source_start to the destination array.
+ static const int kCopyToEnd = -1;
+ // If kCopyToEndAndInitializeToHole is specified as the copy_size to
+ // CopyElements, it copies all of elements from source after source_start to
+ // destination array, padding any remaining uninitialized elements in the
+ // destination array with the hole.
+ static const int kCopyToEndAndInitializeToHole = -2;
+
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
@@ -104,7 +113,8 @@ class ElementsAccessor {
FixedArrayBase* to,
ElementsKind to_kind,
FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, to, to_kind, 0, -1, from);
+ return CopyElements(from_holder, 0, to, to_kind, 0,
+ kCopyToEndAndInitializeToHole, from);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
@@ -146,8 +156,7 @@ class ElementsAccessor {
};
-void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
- FixedArray* from_obj,
+void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 443d4b8d0f..ea8cc3725d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -885,7 +885,8 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
}
if (stack_guard->IsGCRequest()) {
- isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "StackGuard GC request");
stack_guard->Continue(GC_REQUEST);
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 15ded01e7b..143099cfb8 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -382,6 +382,8 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
+ script->set_compilation_state(
+ Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
script->set_wrapper(*wrapper);
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
@@ -552,7 +554,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
- function_info->allows_lazy_compilation()) {
+ function_info->allows_lazy_compilation() &&
+ !function_info->optimization_disabled()) {
result->MarkForLazyRecompilation();
}
return result;
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index ac30b2941d..0668addb48 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -81,17 +81,41 @@
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
+#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
- JSArguments();
- JSArguments(int argc, const char** argv);
- int argc() const;
- const char** argv();
- const char*& operator[](int idx);
- JSArguments& operator=(JSArguments args);
+ inline int argc() const {
+ return static_cast<int>(storage_[0]);
+ }
+ inline const char** argv() const {
+ return reinterpret_cast<const char**>(storage_[1]);
+ }
+ inline const char*& operator[] (int idx) const {
+ return argv()[idx];
+ }
+ inline JSArguments& operator=(JSArguments args) {
+ set_argc(args.argc());
+ set_argv(args.argv());
+ return *this;
+ }
+ static JSArguments Create(int argc, const char** argv) {
+ JSArguments args;
+ args.set_argc(argc);
+ args.set_argv(argv);
+ return args;
+ }
private:
- int argc_;
- const char** argv_;
+ void set_argc(int argc) {
+ storage_[0] = argc;
+ }
+ void set_argv(const char** argv) {
+ storage_[1] = reinterpret_cast<AtomicWord>(argv);
+ }
+public:
+ // Contains argc and argv. Unfortunately we have to store these two fields
+ // into a single one to avoid making the initialization macro (which would be
+ // "{ 0, NULL }") contain a coma.
+ AtomicWord storage_[2];
};
#endif
@@ -135,7 +159,7 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
+DEFINE_string(hydrogen_filter, "", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
@@ -168,14 +192,15 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
-DEFINE_bool(inline_construct, false, "inline constructor calls")
+DEFINE_bool(inline_construct, true, "inline constructor calls")
+DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, false, "enable all profiler experiments")
+DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
DEFINE_bool(self_optimization, false,
@@ -191,7 +216,7 @@ DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 5900,
"execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 40,
+DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
@@ -282,6 +307,7 @@ DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
// execution.cc
DEFINE_int(stack_size, kPointerSize * 128,
@@ -324,6 +350,9 @@ DEFINE_bool(trace_incremental_marking, false,
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
+
+DEFINE_bool(send_idle_notification, false,
+ "Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
@@ -417,7 +446,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSArguments(),
+DEFINE_args(js_arguments, JSARGUMENTS_INIT,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 75e66ce34d..5720cbda34 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -411,7 +411,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
for (int k = i; k < *argc; k++) {
js_argv[k - start_pos] = StrDup(argv[k]);
}
- *flag->args_variable() = JSArguments(js_argc, js_argv);
+ *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
i = *argc; // Consume all arguments
break;
}
@@ -534,19 +534,6 @@ void FlagList::PrintHelp() {
}
}
-JSArguments::JSArguments()
- : argc_(0), argv_(NULL) {}
-JSArguments::JSArguments(int argc, const char** argv)
- : argc_(argc), argv_(argv) {}
-int JSArguments::argc() const { return argc_; }
-const char** JSArguments::argv() { return argv_; }
-const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
-JSArguments& JSArguments::operator=(JSArguments args) {
- argc_ = args.argc_;
- argv_ = args.argv_;
- return *this;
-}
-
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index be537c96e9..0571a813f5 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -31,6 +31,7 @@
#include "deoptimizer.h"
#include "frames-inl.h"
#include "full-codegen.h"
+#include "lazy-instance.h"
#include "mark-compact.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
@@ -1301,7 +1302,7 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
// Check if the inner pointer points into a large object chunk.
- LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
+ LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
if (large_page != NULL) {
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
@@ -1380,12 +1381,12 @@ struct JSCallerSavedCodeData {
};
-static const JSCallerSavedCodeData kCallerSavedCodeData;
-
+static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
+ LAZY_INSTANCE_INITIALIZER;
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
- return kCallerSavedCodeData.reg_code[n];
+ return caller_saved_code_data.Get().reg_code[n];
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index f77c82df51..d963979ad8 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -313,7 +313,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable());
+ code->set_optimizable(info->IsOptimizable() &&
+ !info->function()->flags()->Contains(kDontOptimize));
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 25e3dba124..58d59862a5 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -470,6 +470,8 @@ class FullCodeGenerator: public AstVisitor {
Label* done);
void EmitVariableLoad(VariableProxy* proxy);
+ void EmitAccessor(Expression* expression);
+
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(int arg_count);
@@ -804,6 +806,28 @@ class FullCodeGenerator: public AstVisitor {
};
+// A map from property names to getter/setter pairs allocated in the zone.
+class AccessorTable: public TemplateHashMap<Literal,
+ ObjectLiteral::Accessors,
+ ZoneListAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone) :
+ TemplateHashMap<Literal,
+ ObjectLiteral::Accessors,
+ ZoneListAllocationPolicy>(Literal::Match),
+ zone_(zone) { }
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true);
+ if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
+ return it;
+ }
+
+ private:
+ Zone* zone_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 4192222f90..d3cd44707c 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -33,6 +33,7 @@
#include "compiler.h"
#include "global-handles.h"
#include "messages.h"
+#include "platform.h"
#include "natives.h"
#include "scopeinfo.h"
@@ -2035,7 +2036,7 @@ static void AddUnwindInfo(CodeDescription* desc) {
}
-Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
void GDBJITInterface::AddCode(const char* name,
@@ -2045,7 +2046,7 @@ void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex.Pointer());
AssertNoAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2126,7 +2127,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -2146,7 +2147,7 @@ void GDBJITInterface::RemoveCode(Code* code) {
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index 2cf15bc61d..0eca9384d8 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -132,9 +132,6 @@ class GDBJITInterface: public AllStatic {
static void RemoveCode(Code* code);
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
-
- private:
- static Mutex* mutex_;
};
#define GDBJIT(action) GDBJITInterface::action
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index e53cc81d6c..25d4ffe89b 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -267,8 +267,9 @@ const int kBinary32ExponentShift = 23;
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-// ASCII/UC16 constants
+// ASCII/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
+// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
const int kASCIISize = kCharSize;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 1bb258e475..416ecbd211 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -800,4 +800,162 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
}
+// This method determines the type of string involved and then gets the UTF8
+// length of the string. It doesn't flatten the string and has log(n) recursion
+// for a string of length n. If the failure flag gets set, then we have to
+// flatten the string and retry. Failures are caused by surrogate pairs in deep
+// cons strings.
+
+// Single surrogate characters that are encountered in the UTF-16 character
+// sequence of the input string get counted as 3 UTF-8 bytes, because that
+// is the way that WriteUtf8 will encode them. Surrogate pairs are counted and
+// encoded as one 4-byte UTF-8 sequence.
+
+// This function conceptually uses recursion on the two halves of cons strings.
+// However, in order to avoid the recursion going too deep it recurses on the
+// second string of the cons, but iterates on the first substring (by manually
+// eliminating it as a tail recursion). This means it counts the UTF-8 length
+// from the end to the start, which makes no difference to the total.
+
+// Surrogate pairs are recognized even if they are split across two sides of a
+// cons, which complicates the implementation somewhat. Therefore, too deep
+// recursion cannot always be avoided. This case is detected, and the failure
+// flag is set, a signal to the caller that the string should be flattened and
+// the operation retried.
+int Utf8LengthHelper(String* input,
+ int from,
+ int to,
+ bool followed_by_surrogate,
+ int max_recursion,
+ bool* failure,
+ bool* starts_with_surrogate) {
+ if (from == to) return 0;
+ int total = 0;
+ bool dummy;
+ while (true) {
+ if (input->IsAsciiRepresentation()) {
+ *starts_with_surrogate = false;
+ return total + to - from;
+ }
+ switch (StringShape(input).representation_tag()) {
+ case kConsStringTag: {
+ ConsString* str = ConsString::cast(input);
+ String* first = str->first();
+ String* second = str->second();
+ int first_length = first->length();
+ if (first_length - from > to - first_length) {
+ if (first_length < to) {
+ // Right hand side is shorter. No need to check the recursion depth
+ // since this can only happen log(n) times.
+ bool right_starts_with_surrogate = false;
+ total += Utf8LengthHelper(second,
+ 0,
+ to - first_length,
+ followed_by_surrogate,
+ max_recursion - 1,
+ failure,
+ &right_starts_with_surrogate);
+ if (*failure) return 0;
+ followed_by_surrogate = right_starts_with_surrogate;
+ input = first;
+ to = first_length;
+ } else {
+ // We only need the left hand side.
+ input = first;
+ }
+ } else {
+ if (first_length > from) {
+ // Left hand side is shorter.
+ if (first->IsAsciiRepresentation()) {
+ total += first_length - from;
+ *starts_with_surrogate = false;
+ starts_with_surrogate = &dummy;
+ input = second;
+ from = 0;
+ to -= first_length;
+ } else if (second->IsAsciiRepresentation()) {
+ followed_by_surrogate = false;
+ total += to - first_length;
+ input = first;
+ to = first_length;
+ } else if (max_recursion > 0) {
+ bool right_starts_with_surrogate = false;
+ // Recursing on the long one. This may fail.
+ total += Utf8LengthHelper(second,
+ 0,
+ to - first_length,
+ followed_by_surrogate,
+ max_recursion - 1,
+ failure,
+ &right_starts_with_surrogate);
+ if (*failure) return 0;
+ input = first;
+ to = first_length;
+ followed_by_surrogate = right_starts_with_surrogate;
+ } else {
+ *failure = true;
+ return 0;
+ }
+ } else {
+ // We only need the right hand side.
+ input = second;
+ from = 0;
+ to -= first_length;
+ }
+ }
+ continue;
+ }
+ case kExternalStringTag:
+ case kSeqStringTag: {
+ Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
+ const uc16* p = vector.start();
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
+ for (int i = from; i < to; i++) {
+ uc16 c = p[i];
+ total += unibrow::Utf8::Length(c, previous);
+ previous = c;
+ }
+ if (to - from > 0) {
+ if (unibrow::Utf16::IsLeadSurrogate(previous) &&
+ followed_by_surrogate) {
+ total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
+ }
+ if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
+ *starts_with_surrogate = true;
+ }
+ }
+ return total;
+ }
+ case kSlicedStringTag: {
+ SlicedString* str = SlicedString::cast(input);
+ int offset = str->offset();
+ input = str->parent();
+ from += offset;
+ to += offset;
+ continue;
+ }
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+ }
+ return 0;
+}
+
+
+int Utf8Length(Handle<String> str) {
+ bool dummy;
+ bool failure;
+ int len;
+ const int kRecursionBudget = 100;
+ do {
+ failure = false;
+ len = Utf8LengthHelper(
+ *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
+ if (failure) FlattenString(str);
+ } while (failure);
+ return len;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 42089134e4..960696b5fb 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -174,6 +174,8 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
+int Utf8Length(Handle<String> str);
+
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index ede098cfd0..5aeb8951ed 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -36,15 +36,15 @@ namespace v8 {
namespace internal {
template<class AllocationPolicy>
-class TemplateHashMap {
+class TemplateHashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- TemplateHashMap(MatchFun match, uint32_t initial_capacity = 8);
+ TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
- ~TemplateHashMap();
+ ~TemplateHashMapImpl();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
@@ -99,10 +99,10 @@ class TemplateHashMap {
void Resize();
};
-typedef TemplateHashMap<FreeStoreAllocationPolicy> HashMap;
+typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
template<class P>
-TemplateHashMap<P>::TemplateHashMap(MatchFun match,
+TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
@@ -110,13 +110,13 @@ TemplateHashMap<P>::TemplateHashMap(MatchFun match,
template<class P>
-TemplateHashMap<P>::~TemplateHashMap() {
+TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
P::Delete(map_);
}
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
@@ -146,7 +146,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
template<class P>
-void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
+void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
@@ -206,7 +206,7 @@ void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
template<class P>
-void TemplateHashMap<P>::Clear() {
+void TemplateHashMapImpl<P>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
@@ -217,13 +217,14 @@ void TemplateHashMap<P>::Clear() {
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Start() const {
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
return Next(map_ - 1);
}
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
+ const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@@ -236,7 +237,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
uint32_t hash) {
ASSERT(key != NULL);
@@ -258,7 +259,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
template<class P>
-void TemplateHashMap<P>::Initialize(uint32_t capacity) {
+void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
if (map_ == NULL) {
@@ -271,7 +272,7 @@ void TemplateHashMap<P>::Initialize(uint32_t capacity) {
template<class P>
-void TemplateHashMap<P>::Resize() {
+void TemplateHashMapImpl<P>::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
@@ -290,6 +291,50 @@ void TemplateHashMap<P>::Resize() {
P::Delete(map);
}
+
+// A hash map for pointer keys and values with an STL-like interface.
+template<class Key, class Value, class AllocationPolicy>
+class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
+ public:
+ STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
+ STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
+ struct value_type {
+ Key* first;
+ Value* second;
+ };
+
+ class Iterator {
+ public:
+ Iterator& operator++() {
+ entry_ = map_->Next(entry_);
+ return *this;
+ }
+
+ value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
+ bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
+
+ private:
+ Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
+ typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
+ map_(map), entry_(entry) { }
+
+ const TemplateHashMapImpl<AllocationPolicy>* map_;
+ typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
+
+ friend class TemplateHashMap;
+ };
+
+ TemplateHashMap(
+ typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
+ : TemplateHashMapImpl<AllocationPolicy>(match) { }
+
+ Iterator begin() const { return Iterator(this, this->Start()); }
+ Iterator end() const { return Iterator(this, NULL); }
+ Iterator find(Key* key, bool insert = false) {
+ return Iterator(this, this->Lookup(key, key->Hash(), insert));
+ }
+};
+
} } // namespace v8::internal
#endif // V8_HASHMAP_H_
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index da98239db1..a1cccf6f22 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -60,8 +60,7 @@
namespace v8 {
namespace internal {
-
-static Mutex* gc_initializer_mutex = OS::CreateMutex();
+static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
Heap::Heap()
@@ -82,7 +81,7 @@ Heap::Heap()
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(128l * LUMP_OF_MEMORY),
+ max_executable_size_(256l * LUMP_OF_MEMORY),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
@@ -93,6 +92,7 @@ Heap::Heap()
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
+ global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -105,6 +105,7 @@ Heap::Heap()
gc_post_processing_depth_(0),
ms_count_(0),
gc_count_(0),
+ remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_allowed_(true),
@@ -2471,34 +2472,26 @@ bool Heap::CreateInitialObjects() {
set_the_hole_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-2),
+ Smi::FromInt(-4),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-3),
+ Smi::FromInt(-2),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-4),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
- { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
- Smi::FromInt(-5),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_frame_alignment_marker(Oddball::cast(obj));
- STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
-
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3401,6 +3394,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
+ code->set_ic_age(global_ic_age_);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4186,8 +4180,6 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
PretenureFlag pretenure) {
- // V8 only supports characters in the Basic Multilingual Plane.
- const uc32 kMaxSupportedChar = 0xFFFF;
// Count the number of characters in the UTF-8 string and check if
// it is an ASCII string.
Access<UnicodeCache::Utf8Decoder>
@@ -4195,8 +4187,12 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
decoder->Reset(string.start(), string.length());
int chars = 0;
while (decoder->has_more()) {
- decoder->GetNext();
- chars++;
+ uint32_t r = decoder->GetNext();
+ if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ chars++;
+ } else {
+ chars += 2;
+ }
}
Object* result;
@@ -4207,10 +4203,15 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
// Convert and copy the characters into the new object.
String* string_result = String::cast(result);
decoder->Reset(string.start(), string.length());
- for (int i = 0; i < chars; i++) {
- uc32 r = decoder->GetNext();
- if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
- string_result->Set(i, r);
+ int i = 0;
+ while (i < chars) {
+ uint32_t r = decoder->GetNext();
+ if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
+ string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
+ } else {
+ string_result->Set(i++, r);
+ }
}
return result;
}
@@ -4267,7 +4268,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
uint32_t hash_field) {
ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
- ASSERT(static_cast<unsigned>(chars) == buffer->Length());
+ ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
// Determine whether the string is ASCII.
bool is_ascii = true;
while (buffer->has_more()) {
@@ -4313,8 +4314,15 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- for (int i = 0; i < chars; i++) {
- answer->Set(i, buffer->GetNext());
+ int i = 0;
+ while (i < chars) {
+ uint32_t character = buffer->GetNext();
+ if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
+ answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
+ } else {
+ answer->Set(i++, character);
+ }
}
return answer;
}
@@ -4808,11 +4816,62 @@ void Heap::EnsureHeapIsIterable() {
}
+void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
+ // This flag prevents incremental marking from requesting GC via stack guard
+ idle_notification_will_schedule_next_gc_ = true;
+ incremental_marking()->Step(step_size);
+ idle_notification_will_schedule_next_gc_ = false;
+
+ if (incremental_marking()->IsComplete()) {
+ bool uncommit = false;
+ if (gc_count_at_last_idle_gc_ == gc_count_) {
+ // No GC since the last full GC, the mutator is probably not active.
+ isolate_->compilation_cache()->Clear();
+ uncommit = true;
+ }
+ CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ gc_count_at_last_idle_gc_ = gc_count_;
+ if (uncommit) {
+ new_space_.Shrink();
+ UncommitFromSpace();
+ }
+ }
+}
+
+
bool Heap::IdleNotification(int hint) {
- if (hint >= 1000) return IdleGlobalGC();
- if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
+ const int kMaxHint = 1000;
+ intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
+ // The size factor is in range [3..100].
+ intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+
+ if (contexts_disposed_ > 0) {
+ if (hint >= kMaxHint) {
+ // The embedder is requesting a lot of GC work after context disposal,
+ // we age inline caches so that they don't keep objects from
+ // the old context alive.
+ AgeInlineCaches();
+ }
+ int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
+ if (hint >= mark_sweep_time && !FLAG_expose_gc &&
+ incremental_marking()->IsStopped()) {
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: contexts disposed");
+ } else {
+ AdvanceIdleIncrementalMarking(step_size);
+ contexts_disposed_ = 0;
+ }
+ // Make sure that we have no pending context disposals.
+ // Take into account that we might have decided to delay full collection
+ // because incremental marking is in progress.
+ ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+ return false;
+ }
+
+ if (hint >= kMaxHint || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
- return true;
+ return IdleGlobalGC();
}
// By doing small chunks of GC work in each IdleNotification,
@@ -4824,9 +4883,6 @@ bool Heap::IdleNotification(int hint) {
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
- intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
- // The size factor is in range [3..100].
- intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (incremental_marking()->IsStopped()) {
if (!IsSweepingComplete() &&
@@ -4853,32 +4909,14 @@ bool Heap::IdleNotification(int hint) {
}
if (incremental_marking()->IsStopped()) {
- if (hint < 1000 && !WorthStartingGCWhenIdle()) {
+ if (!WorthStartingGCWhenIdle()) {
FinishIdleRound();
return true;
}
incremental_marking()->Start();
}
- // This flag prevents incremental marking from requesting GC via stack guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
-
- if (incremental_marking()->IsComplete()) {
- bool uncommit = false;
- if (gc_count_at_last_idle_gc_ == gc_count_) {
- // No GC since the last full GC, the mutator is probably not active.
- isolate_->compilation_cache()->Clear();
- uncommit = true;
- }
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
- gc_count_at_last_idle_gc_ = gc_count_;
- if (uncommit) {
- new_space_.Shrink();
- UncommitFromSpace();
- }
- }
+ AdvanceIdleIncrementalMarking(step_size);
return false;
}
@@ -4911,13 +4949,7 @@ bool Heap::IdleGlobalGC() {
}
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
- if (contexts_disposed_ > 0) {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- } else {
- CollectGarbage(NEW_SPACE, "idle notification");
- }
+ CollectGarbage(NEW_SPACE, "idle notification");
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
@@ -4936,23 +4968,6 @@ bool Heap::IdleGlobalGC() {
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
finished = true;
- } else if (contexts_disposed_ > 0) {
- if (FLAG_expose_gc) {
- contexts_disposed_ = 0;
- } else {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- last_idle_notification_gc_count_ = gc_count_;
- }
- // If this is the first idle notification, we reset the
- // notification count to avoid letting idle notifications for
- // context disposal garbage collections start a potentially too
- // aggressive idle GC cycle.
- if (number_idle_notifications_ <= 1) {
- number_idle_notifications_ = 0;
- uncommit = false;
- }
} else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
@@ -4960,11 +4975,6 @@ bool Heap::IdleGlobalGC() {
finished = true;
}
- // Make sure that we have no pending context disposals and
- // conditionally uncommit from space.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
if (uncommit) UncommitFromSpace();
return finished;
@@ -5612,15 +5622,15 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.SizeAsInt();
*stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
- *stats->old_pointer_space_size = old_pointer_space_->Size();
+ *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
- *stats->old_data_space_size = old_data_space_->Size();
+ *stats->old_data_space_size = old_data_space_->SizeOfObjects();
*stats->old_data_space_capacity = old_data_space_->Capacity();
- *stats->code_space_size = code_space_->Size();
+ *stats->code_space_size = code_space_->SizeOfObjects();
*stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->Size();
+ *stats->map_space_size = map_space_->SizeOfObjects();
*stats->map_space_capacity = map_space_->Capacity();
- *stats->cell_space_size = cell_space_->Size();
+ *stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
@@ -5855,7 +5865,7 @@ bool Heap::SetUp(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
- gc_initializer_mutex->Lock();
+ gc_initializer_mutex.Pointer()->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
initialized_gc = true;
@@ -5863,7 +5873,7 @@ bool Heap::SetUp(bool create_heap_objects) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
- gc_initializer_mutex->Unlock();
+ gc_initializer_mutex.Pointer()->Unlock();
MarkMapPointersAsEncoded(false);
@@ -6958,4 +6968,19 @@ void Heap::FreeQueuedChunks() {
chunks_queued_for_free_ = NULL;
}
+
+void Heap::RememberUnmappedPage(Address page, bool compacted) {
+ uintptr_t p = reinterpret_cast<uintptr_t>(page);
+ // Tag the page pointer to make it findable in the dump file.
+ if (compacted) {
+ p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
+ } else {
+ p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
+ }
+ remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
+ reinterpret_cast<Address>(p);
+ remembered_unmapped_pages_index_++;
+ remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index df3717e4fa..2bd037f15b 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -77,7 +77,6 @@ namespace internal {
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
- V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The first 32 roots above this line should be boring from a GC point of */ \
/* view. This means they are never in new space and never on a page that */ \
@@ -1481,6 +1480,13 @@ class Heap {
void ClearNormalizedMapCaches();
+ // Clears the cache of ICs related to this map.
+ void ClearCacheOnMap(Map* map) {
+ if (FLAG_cleanup_code_caches_at_gc) {
+ map->ClearCodeCache(this);
+ }
+ }
+
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
@@ -1583,6 +1589,19 @@ class Heap {
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
+ // For post mortem debugging.
+ void RememberUnmappedPage(Address page, bool compacted);
+
+ // Global inline caching age: it is incremented on some GCs after context
+ // disposal. We use it to flush inline caches.
+ int global_ic_age() {
+ return global_ic_age_;
+ }
+
+ void AgeInlineCaches() {
+ ++global_ic_age_;
+ }
+
private:
Heap();
@@ -1610,6 +1629,8 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
+ int global_ic_age_;
+
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
@@ -1634,6 +1655,11 @@ class Heap {
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
+ // For post mortem debugging.
+ static const int kRememberedUnmappedPages = 128;
+ int remembered_unmapped_pages_index_;
+ Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+
// Total length of the strings we failed to flatten since the last GC.
int unflattened_strings_length_;
@@ -1781,7 +1807,6 @@ class Heap {
inline void UpdateOldSpaceLimits();
-
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@@ -1960,9 +1985,24 @@ class Heap {
return incremental_marking()->WorthActivating();
}
+ // Estimates how many milliseconds a Mark-Sweep would take to complete.
+ // In idle notification handler we assume that this function will return:
+ // - a number less than 10 for small heaps, which are less than 8Mb.
+ // - a number greater than 10 for large heaps, which are greater than 32Mb.
+ int TimeMarkSweepWouldTakeInMs() {
+ // Rough estimate of how many megabytes of heap can be processed in 1 ms.
+ static const int kMbPerMs = 2;
+
+ int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
+ return heap_size_mb / kMbPerMs;
+ }
+
// Returns true if no more GC work is left.
bool IdleGlobalGC();
+ void AdvanceIdleIncrementalMarking(intptr_t step_size);
+
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index f7391dd33d..f698da46d4 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -885,6 +885,15 @@ HValue* HChange::Canonicalize() {
}
+HValue* HWrapReceiver::Canonicalize() {
+ if (HasNoUses()) return NULL;
+ if (receiver()->type().IsJSObject()) {
+ return receiver();
+ }
+ return this;
+}
+
+
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -2248,6 +2257,46 @@ void HIn::PrintDataTo(StringStream* stream) {
}
+Representation HPhi::InferredRepresentation() {
+ bool double_occurred = false;
+ bool int32_occurred = false;
+ for (int i = 0; i < OperandCount(); ++i) {
+ HValue* value = OperandAt(i);
+ if (value->IsUnknownOSRValue()) {
+ HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
+ if (hint_value != NULL) {
+ Representation hint = hint_value->representation();
+ if (hint.IsDouble()) double_occurred = true;
+ if (hint.IsInteger32()) int32_occurred = true;
+ }
+ continue;
+ }
+ if (value->representation().IsDouble()) double_occurred = true;
+ if (value->representation().IsInteger32()) int32_occurred = true;
+ if (value->representation().IsTagged()) {
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ if (constant->IsConvertibleToInteger()) {
+ int32_occurred = true;
+ } else if (constant->HasNumberValue()) {
+ double_occurred = true;
+ } else {
+ return Representation::Tagged();
+ }
+ } else {
+ return Representation::Tagged();
+ }
+ }
+ }
+
+ if (double_occurred) return Representation::Double();
+
+ if (int32_occurred) return Representation::Integer32();
+
+ return Representation::None();
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 5733e51aff..fb5879fd90 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -185,7 +185,8 @@ class LChunkBuilder;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
- V(DateField)
+ V(DateField) \
+ V(WrapReceiver)
#define GVN_FLAG_LIST(V) \
V(Calls) \
@@ -2260,20 +2261,7 @@ class HPhi: public HValue {
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation() {
- bool double_occurred = false;
- bool int32_occurred = false;
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) return Representation::Tagged();
- }
-
- if (double_occurred) return Representation::Double();
- if (int32_occurred) return Representation::Integer32();
- return Representation::None();
- }
+ virtual Representation InferredRepresentation();
virtual Range* InferRange(Zone* zone);
virtual Representation RequiredInputRepresentation(int index) {
@@ -2503,6 +2491,27 @@ class HBinaryOperation: public HTemplateInstruction<3> {
};
+class HWrapReceiver: public HTemplateInstruction<2> {
+ public:
+ HWrapReceiver(HValue* receiver, HValue* function) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, receiver);
+ SetOperandAt(1, function);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ HValue* receiver() { return OperandAt(0); }
+ HValue* function() { return OperandAt(1); }
+
+ virtual HValue* Canonicalize();
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
+};
+
+
class HApplyArguments: public HTemplateInstruction<4> {
public:
HApplyArguments(HValue* function,
@@ -3414,13 +3423,27 @@ class HCallStub: public HUnaryCall {
class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
- HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+ HUnknownOSRValue()
+ : incoming_value_(NULL) {
+ set_representation(Representation::Tagged());
+ }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ void set_incoming_value(HPhi* value) {
+ incoming_value_ = value;
+ }
+
+ HPhi* incoming_value() {
+ return incoming_value_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
+
+ private:
+ HPhi* incoming_value_;
};
@@ -4284,7 +4307,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone) {
- return new(zone) Range(0, String::kMaxUC16CharCode);
+ return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
};
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 11b1157179..9b77408232 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -1766,6 +1766,12 @@ void HInferRepresentation::InferBasedOnInputs(HValue* current) {
ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
Representation inferred = current->InferredRepresentation();
if (inferred.IsSpecialization()) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on inputs\n",
+ current->id(),
+ r.Mnemonic(),
+ inferred.Mnemonic());
+ }
current->ChangeRepresentation(inferred);
AddDependantsToWorklist(current);
}
@@ -1793,6 +1799,12 @@ void HInferRepresentation::InferBasedOnUses(HValue* value) {
Representation new_rep = TryChange(value);
if (!new_rep.IsNone()) {
if (!value->representation().Equals(new_rep)) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on uses\n",
+ value->id(),
+ r.Mnemonic(),
+ new_rep.Mnemonic());
+ }
value->ChangeRepresentation(new_rep);
AddDependantsToWorklist(value);
}
@@ -2508,6 +2520,14 @@ HGraph* HGraphBuilder::CreateGraph() {
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis();
+ if (graph()->has_osr_loop_entry()) {
+ const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ HPhi* phi = phis->at(j);
+ graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
+ }
+ }
+
HInferRepresentation rep(graph());
rep.Analyze();
@@ -2613,6 +2633,10 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
AddInstruction(undefined_constant);
graph_->set_undefined_constant(undefined_constant);
+ HArgumentsObject* object = new(zone()) HArgumentsObject;
+ AddInstruction(object);
+ graph()->SetArgumentsObject(object);
+
// Set the initial values of parameters including "this". "This" has
// parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@@ -2639,10 +2663,9 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
if (!scope->arguments()->IsStackAllocated()) {
return Bailout("context-allocated arguments");
}
- HArgumentsObject* object = new(zone()) HArgumentsObject;
- AddInstruction(object);
- graph()->SetArgumentsObject(object);
- environment()->Bind(scope->arguments(), object);
+
+ environment()->Bind(scope->arguments(),
+ graph()->GetArgumentsObject());
}
}
@@ -3077,8 +3100,8 @@ bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
}
-void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return;
+bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+ if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
@@ -3093,10 +3116,14 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
int osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
+ ZoneList<HUnknownOSRValue*>* osr_values =
+ new(zone()) ZoneList<HUnknownOSRValue*>(length);
+
for (int i = 0; i < first_expression_index; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Bind(i, osr_value);
+ osr_values->Add(osr_value);
}
if (first_expression_index != length) {
@@ -3105,9 +3132,12 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
+ osr_values->Add(osr_value);
}
}
+ graph()->set_osr_values(osr_values);
+
AddSimulate(osr_entry_id);
AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
HContext* context = new(zone()) HContext;
@@ -3116,6 +3146,7 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
set_current_block(loop_predecessor);
+ return true;
}
@@ -3139,10 +3170,11 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -3181,10 +3213,12 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -3226,10 +3260,11 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -3321,10 +3356,11 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
@@ -3639,22 +3675,27 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
- if (!boilerplate->HasFastElements()) return false;
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value = JSObject::GetElement(boilerplate, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties,
- total_size)) {
- return false;
+ if (boilerplate->HasFastDoubleElements()) {
+ *total_size += FixedDoubleArray::SizeFor(elements->length());
+ } else if (boilerplate->HasFastElements()) {
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value = JSObject::GetElement(boilerplate, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteral(value_object,
+ max_depth - 1,
+ max_properties,
+ total_size)) {
+ return false;
+ }
}
}
+ *total_size += FixedArray::SizeFor(length);
+ } else {
+ return false;
}
- *total_size += FixedArray::SizeFor(length);
}
Handle<FixedArray> properties(boilerplate->properties());
@@ -3734,18 +3775,12 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
if (property->emit_store()) {
+ property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
- Handle<String> name = Handle<String>::cast(key->handle());
- HStoreNamedGeneric* store =
- new(zone()) HStoreNamedGeneric(
- context,
- literal,
- name,
- value,
- function_strict_mode_flag());
+ HInstruction* store = BuildStoreNamed(literal, value, property);
AddInstruction(store);
- AddSimulate(key->id());
+ if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -3950,6 +3985,25 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
HValue* value,
+ ObjectLiteral::Property* prop) {
+ Literal* key = prop->key()->AsLiteral();
+ Handle<String> name = Handle<String>::cast(key->handle());
+ ASSERT(!name.is_null());
+
+ LookupResult lookup(isolate());
+ Handle<Map> type = prop->GetReceiverType();
+ bool is_monomorphic = prop->IsMonomorphic() &&
+ ComputeStoredField(type, name, &lookup);
+
+ return is_monomorphic
+ ? BuildStoreNamedField(object, name, value, type, &lookup,
+ true) // Needs smi and map check.
+ : BuildStoreNamedGeneric(object, name, value);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+ HValue* value,
Expression* expr) {
Property* prop = (expr->AsProperty() != NULL)
? expr->AsProperty()
@@ -4474,6 +4528,10 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
+ if (expr->IsUninitialized() && !FLAG_always_opt) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ }
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
HValue* context = environment()->LookupContext();
@@ -5226,10 +5284,21 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
return false;
}
- // Don't inline functions that uses the arguments object.
+ // If the function uses the arguments object check that inlining of functions
+ // with arguments object is enabled and the arguments-variable is
+ // stack allocated.
if (function->scope()->arguments() != NULL) {
- TraceInline(target, caller, "target requires special argument handling");
- return false;
+ if (!FLAG_inline_arguments) {
+ TraceInline(target, caller, "target uses arguments object");
+ return false;
+ }
+
+ if (!function->scope()->arguments()->IsStackAllocated()) {
+ TraceInline(target,
+ caller,
+ "target uses non-stackallocated arguments object");
+ return false;
+ }
}
// All declarations must be inlineable.
@@ -5307,6 +5376,12 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
function,
call_kind,
function_state()->is_construct()));
+ // If the function uses arguments object create and bind one.
+ if (function->scope()->arguments() != NULL) {
+ ASSERT(function->scope()->arguments()->IsStackAllocated());
+ environment()->Bind(function->scope()->arguments(),
+ graph()->GetArgumentsObject());
+ }
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
if (HasStackOverflow()) {
@@ -5645,13 +5720,6 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
HValue* arg_two_value = environment()->Lookup(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
- // Our implementation of arguments (based on this stack frame or an
- // adapter below it) does not work for inlined functions.
- if (function_state()->outer() != NULL) {
- Bailout("Function.prototype.apply optimization in inlined function");
- return true;
- }
-
// Found pattern f.apply(receiver, arguments).
VisitForValue(prop->obj());
if (HasStackOverflow() || current_block() == NULL) return true;
@@ -5662,13 +5730,46 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
VisitForValue(args->at(0));
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* receiver = Pop();
- HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
- HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
- HInstruction* result =
- new(zone()) HApplyArguments(function, receiver, length, elements);
- result->set_position(expr->position());
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
+
+ if (function_state()->outer() == NULL) {
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HInstruction* length =
+ AddInstruction(new(zone()) HArgumentsLength(elements));
+ HValue* wrapped_receiver =
+ AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+ HInstruction* result =
+ new(zone()) HApplyArguments(function,
+ wrapped_receiver,
+ length,
+ elements);
+ result->set_position(expr->position());
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ } else {
+ // We are inside inlined function and we know exactly what is inside
+ // arguments object.
+ HValue* context = environment()->LookupContext();
+
+ HValue* wrapped_receiver =
+ AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+ PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
+
+ HEnvironment* arguments_env = environment()->arguments_environment();
+
+ int parameter_count = arguments_env->parameter_count();
+ for (int i = 1; i < arguments_env->parameter_count(); i++) {
+ PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
+ }
+
+ HInvokeFunction* call = new(zone()) HInvokeFunction(
+ context,
+ function,
+ parameter_count);
+ Drop(parameter_count);
+ call->set_position(expr->position());
+ ast_context()->ReturnInstruction(call, expr->id());
+ return true;
+ }
}
@@ -6654,6 +6755,15 @@ static bool IsLiteralCompareNil(HValue* left,
}
+static bool IsLiteralCompareBool(HValue* left,
+ Token::Value op,
+ HValue* right) {
+ return op == Token::EQ_STRICT &&
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+}
+
+
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -6701,6 +6811,12 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
+ if (IsLiteralCompareBool(left, op, right)) {
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ }
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b0d67ebb66..e2779bb226 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -293,7 +293,6 @@ class HGraph: public ZoneObject {
HArgumentsObject* GetArgumentsObject() const {
return arguments_object_.get();
}
- bool HasArgumentsObject() const { return arguments_object_.is_set(); }
void SetArgumentsObject(HArgumentsObject* object) {
arguments_object_.set(object);
@@ -314,6 +313,26 @@ class HGraph: public ZoneObject {
void Verify(bool do_full_verify) const;
#endif
+ bool has_osr_loop_entry() {
+ return osr_loop_entry_.is_set();
+ }
+
+ HBasicBlock* osr_loop_entry() {
+ return osr_loop_entry_.get();
+ }
+
+ void set_osr_loop_entry(HBasicBlock* entry) {
+ osr_loop_entry_.set(entry);
+ }
+
+ ZoneList<HUnknownOSRValue*>* osr_values() {
+ return osr_values_.get();
+ }
+
+ void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
+ osr_values_.set(values);
+ }
+
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
@@ -354,6 +373,9 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> constant_hole_;
SetOncePointer<HArgumentsObject> arguments_object_;
+ SetOncePointer<HBasicBlock> osr_loop_entry_;
+ SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -378,6 +400,10 @@ class HEnvironment: public ZoneObject {
return outer;
}
+ HEnvironment* arguments_environment() {
+ return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
+ }
+
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
@@ -887,7 +913,7 @@ class HGraphBuilder: public AstVisitor {
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
- void PreProcessOsrEntry(IterationStatement* statement);
+ bool PreProcessOsrEntry(IterationStatement* statement);
// True iff. we are compiling for OSR and the statement is the entry.
bool HasOsrEntryAt(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
@@ -1077,6 +1103,9 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildStoreNamed(HValue* object,
HValue* value,
Expression* expr);
+ HInstruction* BuildStoreNamed(HValue* object,
+ HValue* value,
+ ObjectLiteral::Property* prop);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index ef109229a2..3cf0d005e9 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -88,7 +88,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return Assembler::kSpecialTargetSize;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index e5ae475e71..929b485ebf 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -97,16 +97,25 @@ struct Register {
int code_;
};
-
-const Register eax = { 0 };
-const Register ecx = { 1 };
-const Register edx = { 2 };
-const Register ebx = { 3 };
-const Register esp = { 4 };
-const Register ebp = { 5 };
-const Register esi = { 6 };
-const Register edi = { 7 };
-const Register no_reg = { -1 };
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
+
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
@@ -589,8 +598,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -601,8 +610,7 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index d3e2a919ca..4faa6a4b24 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -7024,44 +7024,47 @@ struct AheadOfTimeWriteBarrierStubList {
};
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { ebx, eax, edi, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
- { ebx, edi, edx, OMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal and CallFunctionStub.
- { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+ { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
// GenerateStoreField calls the stub with two different permutations of
// registers. This is the second.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
// StoreIC::GenerateNormal via GenerateDictionaryStore
- { ebx, edi, edx, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
// KeyedStoreIC::GenerateGeneric.
- { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+ { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { edi, ebx, ecx, EMIT_REMEMBERED_SET},
- { edx, edi, ebx, EMIT_REMEMBERED_SET},
+ { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { edx, ebx, edi, EMIT_REMEMBERED_SET},
- { edx, ebx, edi, OMIT_REMEMBERED_SET},
+ { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
- { eax, edx, esi, EMIT_REMEMBERED_SET},
- { edx, eax, edi, EMIT_REMEMBERED_SET},
+ { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
- { ebx, eax, ecx, EMIT_REMEMBERED_SET},
+ { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7089,7 +7092,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index de6901f9cd..ea61910322 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -57,8 +57,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-TranscendentalFunction CreateTranscendentalFunction(
- TranscendentalCache::Type type) {
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
@@ -99,7 +98,40 @@ TranscendentalFunction CreateTranscendentalFunction(
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<TranscendentalFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ // If SSE2 is not available, we can use libc's implementation to ensure
+ // consistency since code by fullcodegen's calls into runtime in that case.
+ if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ // Move double input into registers.
+ {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ sqrtsd(xmm0, xmm0);
+ __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ // Load result into floating point register as return value.
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 11de1c4a78..92d7cc1c2c 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -427,14 +427,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & 0x4) == 0) {
- // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
- output_[0]->SetRegister(ebp.code(), frame_pointer);
+ output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -692,11 +685,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -747,9 +738,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
- == fp_value);
+ ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@@ -939,17 +928,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
- // If frame was dynamically aligned, pop padding.
- Label sentinel, sentinel_done;
- __ pop(ecx);
- __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ j(equal, &sentinel);
- __ push(ecx);
- __ jmp(&sentinel_done);
- __ bind(&sentinel);
- __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(1));
- __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
@@ -961,17 +939,6 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
- if (type() == OSR) {
- // If alignment padding is added, push the sentinel.
- Label no_osr_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_osr_padding, Label::kNear);
- __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ bind(&no_osr_padding);
- }
-
-
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index b42ce95f8d..62a2c2af33 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -100,7 +101,9 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
return 13;
}
@@ -321,12 +324,20 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 100;
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -337,7 +348,8 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(127, Max(1, distance / 100));
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -398,7 +410,8 @@ void FullCodeGenerator::EmitReturnSequence() {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
- weight = Min(127, Max(1, distance / 100));
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -1411,6 +1424,15 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ push(Immediate(isolate()->factory()->null_value()));
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1445,6 +1467,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1456,6 +1479,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
@@ -1487,24 +1512,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ push(Immediate(isolate()->factory()->null_value()));
- } else {
- __ push(Immediate(isolate()->factory()->null_value()));
- VisitForStackValue(value);
- }
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
break;
- default: UNREACHABLE();
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(esp, 0));
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index a656175af8..8fb4c79196 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -79,9 +79,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
- info()->osr_ast_id() != AstNode::kNoNumber;
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -156,29 +153,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- if (dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0),
- Immediate(isolate()->factory()->frame_alignment_marker()));
-
- __ bind(&do_not_pad);
- }
-
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -579,7 +553,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -2125,17 +2098,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
- if (dynamic_frame_alignment_) {
- Label aligned;
- // Frame alignment marker (padding) is below arguments,
- // and receiver, so its return-address-relative offset is
- // (num_arguments + 2) words.
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(factory()->frame_alignment_marker()));
- __ j(not_equal, &aligned);
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&aligned);
- }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -2625,15 +2587,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = ToRegister(instr->TempAt(0));
- ASSERT(receiver.is(eax)); // Used for parameter count.
- ASSERT(function.is(edi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(eax));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2675,6 +2632,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(eax)); // Used for parameter count.
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(eax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -4493,33 +4461,47 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ mov(ecx, FieldOperand(source, i));
__ mov(FieldOperand(result, elements_offset + i), ecx);
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(FieldOperand(result, total_offset), Immediate(value_low));
+ __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(ecx, Operand(result, *offset));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ } else {
+ __ mov(FieldOperand(result, total_offset), Immediate(value));
+ }
+ }
} else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
+ UNREACHABLE();
}
}
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 481a2ae7a5..52befc6974 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
@@ -145,10 +144,6 @@ class LCodeGen BASE_EMBEDDED {
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
- void set_dynamic_frame_alignment(bool value) {
- dynamic_frame_alignment_ = value;
- }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -333,7 +328,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
- bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 223fde2be5..2bfbb67bce 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -368,11 +368,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
- spill_slot_count_++;
- num_double_slots_++;
- }
+ if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
@@ -1111,17 +1107,25 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LOperand* temp = TempRegister();
+ LWrapReceiver* result =
+ new(zone()) LWrapReceiver(receiver, function, temp);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
- LOperand* temp = FixedTemp(edx);
LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
- elements,
- temp);
+ elements);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index dd41bfbc77..4ecce96d0f 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -173,7 +173,8 @@ class LCodeGen;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
- V(DateField)
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -456,18 +457,33 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
+class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LWrapReceiver(LOperand* receiver,
+ LOperand* function,
+ LOperand* temp) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements,
- LOperand* temp) {
+ LOperand* elements) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@@ -2273,7 +2289,6 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
- num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2287,8 +2302,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
- int num_double_slots() const { return num_double_slots_; }
-
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2330,7 +2343,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 2c9b60c868..04d6b62c80 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -523,7 +523,7 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(eax, Operand(current_character(), -minus));
__ and_(eax, mask);
__ cmp(eax, c);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index de71818c98..fd267798cf 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1245,14 +1245,9 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(edx, miss);
- }
// Check that the maps haven't changed.
+ __ JumpIfSmi(edx, miss);
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
}
@@ -2829,14 +2824,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(eax, &miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(eax, &miss);
CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index c2ee45ec1e..c7621277db 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -1017,6 +1017,15 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(*code);
} else if (state == MONOMORPHIC) {
+ // We are transitioning from monomorphic to megamorphic case.
+ // Place the current monomorphic stub and stub compiled for
+ // the receiver into stub cache.
+ Map* map = target()->FindFirstMap();
+ if (map != NULL) {
+ isolate()->stub_cache()->Set(*name, map, target());
+ }
+ isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+
set_target(*megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
@@ -1365,7 +1374,7 @@ MaybeObject* StoreIC::Store(State state,
// Strict mode doesn't allow setting non-existent global property
// or an assignment to a read only property.
if (strict_mode == kStrictMode) {
- if (lookup.IsFound() && lookup.IsReadOnly()) {
+ if (lookup.IsProperty() && lookup.IsReadOnly()) {
return TypeError("strict_read_only_property", object, name);
} else if (IsContextual(object)) {
return ReferenceError("not_defined", name);
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index d0346171d3..8fe89b4a98 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -178,7 +178,12 @@ class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+ && (target->ic_age() != heap_->global_ic_age())) {
+ IC::Clear(rinfo->pc());
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ }
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
MarkObject(target);
}
@@ -396,7 +401,7 @@ bool IncrementalMarking::WorthActivating() {
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
!Serializer::enabled() &&
- heap_->PromotedSpaceSize() > kActivationThreshold;
+ heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@@ -795,6 +800,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
Map* map = obj->map();
if (map == filler_map) continue;
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ heap_->ClearCacheOnMap(map);
+ }
+
+
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 0a2c17404e..9fb16fbe96 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -49,6 +49,16 @@ SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
}
+bool Isolate::IsDebuggerActive() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (!NoBarrier_Load(&debugger_initialized_)) return false;
+ return debugger()->IsDebuggerActive();
+#else
+ return false;
+#endif
+}
+
+
bool Isolate::DebuggerHasBreakPoints() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 3dfcbb5a3f..625cc56727 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -38,9 +38,11 @@
#include "heap-profiler.h"
#include "hydrogen.h"
#include "isolate.h"
+#include "lazy-instance.h"
#include "lithium-allocator.h"
#include "log.h"
#include "messages.h"
+#include "platform.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
@@ -55,6 +57,31 @@
namespace v8 {
namespace internal {
+struct GlobalState {
+ Thread::LocalStorageKey per_isolate_thread_data_key;
+ Thread::LocalStorageKey isolate_key;
+ Thread::LocalStorageKey thread_id_key;
+ Isolate* default_isolate;
+ Isolate::ThreadDataTable* thread_data_table;
+ Mutex* mutex;
+};
+
+struct InitializeGlobalState {
+ static void Construct(GlobalState* state) {
+ state->isolate_key = Thread::CreateThreadLocalKey();
+ state->thread_id_key = Thread::CreateThreadLocalKey();
+ state->per_isolate_thread_data_key = Thread::CreateThreadLocalKey();
+ state->thread_data_table = new Isolate::ThreadDataTable();
+ state->default_isolate = new Isolate();
+ state->mutex = OS::CreateMutex();
+ // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
+ // because a non-null thread data may be already set.
+ Thread::SetThreadLocal(state->isolate_key, state->default_isolate);
+ }
+};
+
+static LazyInstance<GlobalState, InitializeGlobalState>::type global_state;
+
Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
@@ -64,10 +91,11 @@ int ThreadId::AllocateThreadId() {
int ThreadId::GetCurrentThreadId() {
- int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+ const GlobalState& global = global_state.Get();
+ int thread_id = Thread::GetThreadLocalInt(global.thread_id_key);
if (thread_id == 0) {
thread_id = AllocateThreadId();
- Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+ Thread::SetThreadLocalInt(global.thread_id_key, thread_id);
}
return thread_id;
}
@@ -311,44 +339,16 @@ void Isolate::PreallocatedStorageDelete(void* p) {
storage->LinkTo(&free_list_);
}
-
-Isolate* Isolate::default_isolate_ = NULL;
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
-Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-
-
-class IsolateInitializer {
- public:
- IsolateInitializer() {
- Isolate::EnsureDefaultIsolate();
- }
-};
-
-static IsolateInitializer* EnsureDefaultIsolateAllocated() {
- // TODO(isolates): Use the system threading API to do this once?
- static IsolateInitializer static_initializer;
- return &static_initializer;
-}
-
-// This variable only needed to trigger static intialization.
-static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
-
-
-
-
-
Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
ThreadId thread_id) {
ASSERT(!thread_id.Equals(ThreadId::Invalid()));
PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
{
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
+ GlobalState* const global = global_state.Pointer();
+ ScopedLock lock(global->mutex);
+ ASSERT(global->thread_data_table->Lookup(this, thread_id) == NULL);
+ global->thread_data_table->Insert(per_thread);
+ ASSERT(global->thread_data_table->Lookup(this, thread_id) == per_thread);
}
return per_thread;
}
@@ -359,8 +359,9 @@ Isolate::PerIsolateThreadData*
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
- per_thread = thread_data_table_->Lookup(this, thread_id);
+ GlobalState* const global = global_state.Pointer();
+ ScopedLock lock(global->mutex);
+ per_thread = global->thread_data_table->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = AllocatePerIsolateThreadData(thread_id);
}
@@ -373,26 +374,25 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
- per_thread = thread_data_table_->Lookup(this, thread_id);
+ GlobalState* const global = global_state.Pointer();
+ ScopedLock lock(global->mutex);
+ per_thread = global->thread_data_table->Lookup(this, thread_id);
}
return per_thread;
}
+bool Isolate::IsDefaultIsolate() const {
+ return this == global_state.Get().default_isolate;
+}
+
+
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
- if (default_isolate_ == NULL) {
- isolate_key_ = Thread::CreateThreadLocalKey();
- thread_id_key_ = Thread::CreateThreadLocalKey();
- per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
- thread_data_table_ = new Isolate::ThreadDataTable();
- default_isolate_ = new Isolate();
- }
+ GlobalState* const global = global_state.Pointer();
// Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
- // becase a non-null thread data may be already set.
- if (Thread::GetThreadLocal(isolate_key_) == NULL) {
- Thread::SetThreadLocal(isolate_key_, default_isolate_);
+ // because a non-null thread data may be already set.
+ if (Thread::GetThreadLocal(global->isolate_key) == NULL) {
+ Thread::SetThreadLocal(global->isolate_key, global->default_isolate);
}
}
@@ -400,32 +400,48 @@ void Isolate::EnsureDefaultIsolate() {
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* Isolate::GetDefaultIsolateDebugger() {
EnsureDefaultIsolate();
- return default_isolate_->debugger();
+ return global_state.Pointer()->default_isolate->debugger();
}
#endif
StackGuard* Isolate::GetDefaultIsolateStackGuard() {
EnsureDefaultIsolate();
- return default_isolate_->stack_guard();
+ return global_state.Pointer()->default_isolate->stack_guard();
+}
+
+
+Thread::LocalStorageKey Isolate::isolate_key() {
+ return global_state.Get().isolate_key;
+}
+
+
+Thread::LocalStorageKey Isolate::thread_id_key() {
+ return global_state.Get().thread_id_key;
+}
+
+
+Thread::LocalStorageKey Isolate::per_isolate_thread_data_key() {
+ return global_state.Get().per_isolate_thread_data_key;
}
void Isolate::EnterDefaultIsolate() {
EnsureDefaultIsolate();
- ASSERT(default_isolate_ != NULL);
+ Isolate* const default_isolate = global_state.Pointer()->default_isolate;
+ ASSERT(default_isolate != NULL);
PerIsolateThreadData* data = CurrentPerIsolateThreadData();
// If not yet in default isolate - enter it.
- if (data == NULL || data->isolate() != default_isolate_) {
- default_isolate_->Enter();
+ if (data == NULL || data->isolate() != default_isolate) {
+ default_isolate->Enter();
}
}
Isolate* Isolate::GetDefaultIsolateForLocking() {
EnsureDefaultIsolate();
- return default_isolate_;
+ return global_state.Pointer()->default_isolate;
}
@@ -1548,8 +1564,8 @@ void Isolate::TearDown() {
Deinit();
- { ScopedLock lock(process_wide_mutex_);
- thread_data_table_->RemoveAllThreads(this);
+ { ScopedLock lock(global_state.Pointer()->mutex);
+ global_state.Pointer()->thread_data_table->RemoveAllThreads(this);
}
if (!IsDefaultIsolate()) {
@@ -1602,8 +1618,9 @@ void Isolate::Deinit() {
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
- Thread::SetThreadLocal(isolate_key_, isolate);
- Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+ const GlobalState& global = global_state.Get();
+ Thread::SetThreadLocal(global.isolate_key, isolate);
+ Thread::SetThreadLocal(global.per_isolate_thread_data_key, data);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 2c2618a1f0..0c5a54c598 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -430,19 +430,25 @@ class Isolate {
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
- Thread::GetThreadLocal(per_isolate_thread_data_key_));
+ Thread::GetThreadLocal(per_isolate_thread_data_key()));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
+ const Thread::LocalStorageKey key = isolate_key();
Isolate* isolate = reinterpret_cast<Isolate*>(
- Thread::GetExistingThreadLocal(isolate_key_));
+ Thread::GetExistingThreadLocal(key));
+ if (!isolate) {
+ EnsureDefaultIsolate();
+ isolate = reinterpret_cast<Isolate*>(
+ Thread::GetExistingThreadLocal(key));
+ }
ASSERT(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
- return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+ return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key()));
}
// Usually called by Init(), but can be called early e.g. to allow
@@ -464,7 +470,7 @@ class Isolate {
// for legacy API reasons.
void TearDown();
- bool IsDefaultIsolate() const { return this == default_isolate_; }
+ bool IsDefaultIsolate() const;
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
@@ -489,14 +495,12 @@ class Isolate {
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
- static Thread::LocalStorageKey isolate_key() {
- return isolate_key_;
- }
+ static Thread::LocalStorageKey isolate_key();
// Returns the key used to store process-wide thread IDs.
- static Thread::LocalStorageKey thread_id_key() {
- return thread_id_key_;
- }
+ static Thread::LocalStorageKey thread_id_key();
+
+ static Thread::LocalStorageKey per_isolate_thread_data_key();
// If a client attempts to create a Locker without specifying an isolate,
// we assume that the client is using legacy behavior. Set up the current
@@ -925,6 +929,7 @@ class Isolate {
}
#endif
+ inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
#ifdef DEBUG
@@ -1032,6 +1037,9 @@ class Isolate {
private:
Isolate();
+ friend struct GlobalState;
+ friend struct InitializeGlobalState;
+
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
@@ -1074,16 +1082,6 @@ class Isolate {
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
- // This mutex protects highest_thread_id_, thread_data_table_ and
- // default_isolate_.
- static Mutex* process_wide_mutex_;
-
- static Thread::LocalStorageKey per_isolate_thread_data_key_;
- static Thread::LocalStorageKey isolate_key_;
- static Thread::LocalStorageKey thread_id_key_;
- static Isolate* default_isolate_;
- static ThreadDataTable* thread_data_table_;
-
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
@@ -1105,7 +1103,7 @@ class Isolate {
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
-// PreInits and returns a default isolate. Needed when a new thread tries
+ // PreInits and returns a default isolate. Needed when a new thread tries
// to create a Locker for the first time (the lock itself is in the isolate).
static Isolate* GetDefaultIsolateForLocking();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 7e695bb221..8ccbae49ce 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -1444,7 +1444,7 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
if (ascii) {
char_mask = String::kMaxAsciiCharCode;
} else {
- char_mask = String::kMaxUC16CharCode;
+ char_mask = String::kMaxUtf16CodeUnit;
}
uc16 exor = c1 ^ c2;
// Check whether exor has only one bit set.
@@ -1546,7 +1546,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
if (ascii) {
max_char = String::kMaxAsciiCharCode;
} else {
- max_char = String::kMaxUC16CharCode;
+ max_char = String::kMaxUtf16CodeUnit;
}
Label success;
@@ -1642,7 +1642,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->CheckCharacterLT(from, on_failure);
}
}
- if (to != String::kMaxUC16CharCode) {
+ if (to != String::kMaxUtf16CodeUnit) {
if (cc->is_negated()) {
macro_assembler->CheckCharacterLT(to + 1, on_failure);
} else {
@@ -1835,7 +1835,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
if (asc) {
char_mask = String::kMaxAsciiCharCode;
} else {
- char_mask = String::kMaxUC16CharCode;
+ char_mask = String::kMaxUtf16CodeUnit;
}
mask_ = 0;
value_ = 0;
@@ -1887,7 +1887,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if (compiler->ascii()) {
char_mask = String::kMaxAsciiCharCode;
} else {
- char_mask = String::kMaxUC16CharCode;
+ char_mask = String::kMaxUtf16CodeUnit;
}
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
@@ -1939,7 +1939,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
if (compiler->ascii()) {
char_mask = String::kMaxAsciiCharCode;
} else {
- char_mask = String::kMaxUC16CharCode;
+ char_mask = String::kMaxUtf16CodeUnit;
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
@@ -4079,7 +4079,7 @@ static void AddClassNegated(const uc16 *elmv,
int elmc,
ZoneList<CharacterRange>* ranges) {
ASSERT(elmv[0] != 0x0000);
- ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
+ ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
ASSERT(last <= elmv[i] - 1);
@@ -4087,7 +4087,7 @@ static void AddClassNegated(const uc16 *elmv,
ranges->Add(CharacterRange(last, elmv[i] - 1));
last = elmv[i + 1] + 1;
}
- ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
+ ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
}
@@ -4633,8 +4633,8 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
from = range.to();
i++;
}
- if (from < String::kMaxUC16CharCode) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
+ if (from < String::kMaxUtf16CodeUnit) {
+ negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
}
}
@@ -4797,7 +4797,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value) {
entry->AddValue(value);
// Bail out if the last interval ended at 0xFFFF since otherwise
// adding 1 will wrap around to 0.
- if (entry->to() == String::kMaxUC16CharCode)
+ if (entry->to() == String::kMaxUtf16CodeUnit)
break;
ASSERT(entry->to() + 1 > current.from());
current.set_from(entry->to() + 1);
@@ -5117,7 +5117,7 @@ int TextNode::ComputeFirstCharacterSet(int budget) {
int new_length = length + 1;
if (length > 0) {
if (ranges->at(0).from() == 0) new_length--;
- if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
+ if (ranges->at(length - 1).to() == String::kMaxUtf16CodeUnit) {
new_length--;
}
}
@@ -5207,14 +5207,14 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
if (last < range.from())
AddRange(CharacterRange(last, range.from() - 1));
if (range.to() >= last) {
- if (range.to() == String::kMaxUC16CharCode) {
+ if (range.to() == String::kMaxUtf16CodeUnit) {
return;
} else {
last = range.to() + 1;
}
}
}
- AddRange(CharacterRange(last, String::kMaxUC16CharCode));
+ AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
}
diff --git a/deps/v8/src/lazy-instance.h b/deps/v8/src/lazy-instance.h
new file mode 100644
index 0000000000..09dfe2154d
--- /dev/null
+++ b/deps/v8/src/lazy-instance.h
@@ -0,0 +1,216 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed. This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety. The Type constructor will only ever
+// be called once, even if two threads are racing to create the object. Get()
+// and Pointer() will always return the same, completely initialized instance.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor. It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property. You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance. It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap. This may help with the
+// performance of creating the instance, and reducing heap fragmentation. This
+// requires that Type be a complete type so we can determine the size. See
+// notes for advanced users below for more explanations.
+//
+// Example usage:
+// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
+// void SomeMethod() {
+// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
+//
+// MyClass* ptr = my_instance.Pointer();
+// ptr->DoDoDo(); // MyClass::DoDoDo
+// }
+//
+// Additionally you can override the way your instance is constructed by
+// providing your own trait:
+// Example usage:
+// struct MyCreateTrait {
+// static void Construct(MyClass* allocated_ptr) {
+// new (allocated_ptr) MyClass(/* extra parameters... */);
+// }
+// };
+// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
+// LAZY_INSTANCE_INITIALIZER;
+//
+// Notes for advanced users:
+// LazyInstance can actually be used in two different ways:
+//
+// - "Static mode" which is the default mode since it is the most efficient
+// (no extra heap allocation). In this mode, the instance is statically
+// allocated (stored in the global data section at compile time).
+// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
+// must be used to initialize static lazy instances.
+//
+// - "Dynamic mode". In this mode, the instance is dynamically allocated and
+// constructed (using new) by default. This mode is useful if you have to
+// deal with some code already allocating the instance for you (e.g.
+// OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
+// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
+// dynamic lazy instances.
+
+#ifndef V8_LAZY_INSTANCE_H_
+#define V8_LAZY_INSTANCE_H_
+
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
+
+// Default to static mode.
+#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+template <typename T>
+struct LeakyInstanceTrait {
+ static void Destroy(T* /* instance */) {}
+};
+
+
+// Traits that define how an instance is allocated and accessed.
+
+template <typename T>
+struct StaticallyAllocatedInstanceTrait {
+ typedef char StorageType[sizeof(T)];
+
+ static T* MutableInstance(StorageType* storage) {
+ return reinterpret_cast<T*>(storage);
+ }
+
+ template <typename ConstructTrait>
+ static void InitStorageUsingTrait(StorageType* storage) {
+ ConstructTrait::Construct(MutableInstance(storage));
+ }
+};
+
+
+template <typename T>
+struct DynamicallyAllocatedInstanceTrait {
+ typedef T* StorageType;
+
+ static T* MutableInstance(StorageType* storage) {
+ return *storage;
+ }
+
+ template <typename CreateTrait>
+ static void InitStorageUsingTrait(StorageType* storage) {
+ *storage = CreateTrait::Create();
+ }
+};
+
+
+template <typename T>
+struct DefaultConstructTrait {
+ // Constructs the provided object which was already allocated.
+ static void Construct(T* allocated_ptr) {
+ new(allocated_ptr) T();
+ }
+};
+
+
+template <typename T>
+struct DefaultCreateTrait {
+ static T* Create() {
+ return new T();
+ }
+};
+
+
+// TODO(pliard): Handle instances destruction (using global destructors).
+template <typename T, typename AllocationTrait, typename CreateTrait,
+ typename DestroyTrait /* not used yet. */ >
+struct LazyInstanceImpl {
+ public:
+ typedef typename AllocationTrait::StorageType StorageType;
+
+ private:
+ static void InitInstance(StorageType* storage) {
+ AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+ }
+
+ void Init() const {
+ CallOnce(&once_, &InitInstance, &storage_);
+ }
+
+ public:
+ T* Pointer() {
+ Init();
+ return AllocationTrait::MutableInstance(&storage_);
+ }
+
+ const T& Get() const {
+ Init();
+ return *AllocationTrait::MutableInstance(&storage_);
+ }
+
+ mutable OnceType once_;
+ // Note that the previous field, OnceType, is an AtomicWord which guarantees
+ // the correct alignment of the storage field below.
+ mutable StorageType storage_;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultConstructTrait<T>,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyStaticInstance {
+ typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, CreateTrait,
+ DestroyTrait> type;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultConstructTrait<T>,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyInstance {
+ // A LazyInstance is a LazyStaticInstance.
+ typedef typename LazyStaticInstance<T, CreateTrait, DestroyTrait>::type type;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultConstructTrait<T>,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyDynamicInstance {
+ typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, CreateTrait,
+ DestroyTrait> type;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_LAZY_INSTANCE_H_
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 83805dc729..4396c7354c 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -46,29 +46,6 @@
namespace v8 {
namespace internal {
-
-#define DEFINE_OPERAND_CACHE(name, type) \
- name name::cache[name::kNumCachedOperands]; \
- void name::SetUpCache() { \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- static bool name##_initialize() { \
- name::SetUpCache(); \
- return true; \
- } \
- static bool name##_cache_initialized = name##_initialize();
-
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister, REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
-
-#undef DEFINE_OPERAND_CACHE
-
-
static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
return a.Value() < b.Value() ? a : b;
}
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 5a44fcec99..aefd8b6492 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -94,6 +94,31 @@ void LOperand::PrintTo(StringStream* stream) {
}
}
+#define DEFINE_OPERAND_CACHE(name, type) \
+ name* name::cache = NULL; \
+ void name::SetUpCache() { \
+ if (cache) return; \
+ cache = new name[kNumCachedOperands]; \
+ for (int i = 0; i < kNumCachedOperands; i++) { \
+ cache[i].ConvertTo(type, i); \
+ } \
+ } \
+
+DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
+DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
+DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
+DEFINE_OPERAND_CACHE(LRegister, REGISTER)
+DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
+
+#undef DEFINE_OPERAND_CACHE
+
+void LOperand::SetUpCaches() {
+ LConstantOperand::SetUpCache();
+ LStackSlot::SetUpCache();
+ LDoubleStackSlot::SetUpCache();
+ LRegister::SetUpCache();
+ LDoubleRegister::SetUpCache();
+}
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index ec7269509f..d1e2e3cdef 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -69,6 +69,10 @@ class LOperand: public ZoneObject {
ASSERT(this->index() == index);
}
+ // Calls SetUpCache() for each subclass. Don't forget to update this method
+ // if you add a new LOperand subclass.
+ static void SetUpCaches();
+
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
@@ -264,7 +268,7 @@ class LConstantOperand: public LOperand {
private:
static const int kNumCachedOperands = 128;
- static LConstantOperand cache[];
+ static LConstantOperand* cache;
LConstantOperand() : LOperand() { }
explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
@@ -299,7 +303,7 @@ class LStackSlot: public LOperand {
private:
static const int kNumCachedOperands = 128;
- static LStackSlot cache[];
+ static LStackSlot* cache;
LStackSlot() : LOperand() { }
explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
@@ -323,7 +327,7 @@ class LDoubleStackSlot: public LOperand {
private:
static const int kNumCachedOperands = 128;
- static LDoubleStackSlot cache[];
+ static LDoubleStackSlot* cache;
LDoubleStackSlot() : LOperand() { }
explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
@@ -347,7 +351,7 @@ class LRegister: public LOperand {
private:
static const int kNumCachedOperands = 16;
- static LRegister cache[];
+ static LRegister* cache;
LRegister() : LOperand() { }
explicit LRegister(int index) : LOperand(REGISTER, index) { }
@@ -371,7 +375,7 @@ class LDoubleRegister: public LOperand {
private:
static const int kNumCachedOperands = 16;
- static LDoubleRegister cache[];
+ static LDoubleRegister* cache;
LDoubleRegister() : LOperand() { }
explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 3979719071..21d64df21c 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -35,6 +35,7 @@
#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
+#include "platform.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "string-stream.h"
@@ -461,18 +462,20 @@ class Logger::NameBuffer {
utf8_pos_ += utf8_length;
return;
}
- int uc16_length = Min(str->length(), kUc16BufferSize);
- String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
+ int uc16_length = Min(str->length(), kUtf16BufferSize);
+ String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
- uc16 c = uc16_buffer_[i];
+ uc16 c = utf16_buffer[i];
if (c <= String::kMaxAsciiCharCodeU) {
utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
} else {
- int char_length = unibrow::Utf8::Length(c);
+ int char_length = unibrow::Utf8::Length(c, previous);
if (utf8_pos_ + char_length > kUtf8BufferSize) break;
- unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
+ unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
utf8_pos_ += char_length;
}
+ previous = c;
}
}
@@ -504,11 +507,11 @@ class Logger::NameBuffer {
private:
static const int kUtf8BufferSize = 512;
- static const int kUc16BufferSize = 128;
+ static const int kUtf16BufferSize = 128;
int utf8_pos_;
char utf8_buffer_[kUtf8BufferSize];
- uc16 uc16_buffer_[kUc16BufferSize];
+ uc16 utf16_buffer[kUtf16BufferSize];
};
@@ -1726,13 +1729,14 @@ void Logger::EnableSlidingStateWindow() {
}
}
+// Protects the state below.
+static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
-Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(active_samplers_mutex.Pointer());
for (int i = 0;
ActiveSamplersExist() && i < active_samplers_->length();
++i) {
@@ -1759,7 +1763,7 @@ SamplerRegistry::State SamplerRegistry::GetState() {
void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
- ScopedLock lock(mutex_);
+ ScopedLock lock(active_samplers_mutex.Pointer());
if (active_samplers_ == NULL) {
active_samplers_ = new List<Sampler*>;
} else {
@@ -1771,7 +1775,7 @@ void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
- ScopedLock lock(mutex_);
+ ScopedLock lock(active_samplers_mutex.Pointer());
ASSERT(active_samplers_ != NULL);
bool removed = active_samplers_->RemoveElement(sampler);
ASSERT(removed);
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index e54f0413e9..129738757e 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -454,7 +454,6 @@ class SamplerRegistry : public AllStatic {
return active_samplers_ != NULL && !active_samplers_->is_empty();
}
- static Mutex* mutex_; // Protects the state below.
static List<Sampler*>* active_samplers_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index c9ed66f6bf..43f6b8986f 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -52,13 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) {
}
-void MarkCompactCollector::ClearCacheOnMap(Map* map) {
- if (FLAG_cleanup_code_caches_at_gc) {
- map->ClearCodeCache(heap());
- }
-}
-
-
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@@ -88,7 +81,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
if (obj->IsMap()) {
- ClearCacheOnMap(Map::cast(obj));
+ heap_->ClearCacheOnMap(Map::cast(obj));
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 17be2234c7..dde172d2f0 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -1049,7 +1049,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_state() == MEGAMORPHIC ||
- heap->mark_compact_collector()->flush_monomorphic_ics_)) {
+ heap->mark_compact_collector()->flush_monomorphic_ics_ ||
+ target->ic_age() != heap->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
@@ -1797,7 +1798,7 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
- ClearCacheOnMap(map);
+ heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
@@ -3427,7 +3428,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ClearEvacuationCandidate();
p->ResetLiveBytes();
space->ReleasePage(p);
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 442ad1d98c..66ffd19535 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -638,9 +638,6 @@ class MarkCompactCollector {
// Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
- // Clears the cache of ICs related to this map.
- INLINE(void ClearCacheOnMap(Map* map));
-
void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index f9e75face8..2ff4710eff 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -117,13 +117,31 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(pc_);
+ ASSERT(IsCodeTarget(rmode_) ||
+ rmode_ == RUNTIME_ENTRY ||
+ rmode_ == EMBEDDED_OBJECT ||
+ rmode_ == EXTERNAL_REFERENCE);
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return Assembler::kSpecialTargetSize;
}
@@ -281,7 +299,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -307,7 +325,7 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 9f803d9c1f..0d7f9218b4 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -850,7 +850,6 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
return rmode != RelocInfo::NONE;
}
-
void Assembler::GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
@@ -1319,7 +1318,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1329,7 +1328,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1604,7 +1603,7 @@ void Assembler::clz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1612,7 +1611,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
@@ -1772,25 +1771,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -1831,7 +1830,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -1847,7 +1846,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- ASSERT(mips32r2);
+ ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index b1ffc45c0a..8b877f653b 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -553,10 +553,13 @@ class Assembler : public AssemblerBase {
static void JumpLabelToJumpRegister(Address pc);
// This sets the branch destination (which gets loaded at the call address).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
+ set_target_address_at(
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ target);
}
// This sets the branch destination.
@@ -578,8 +581,7 @@ class Assembler : public AssemblerBase {
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
- static const int kCallTargetSize = 0 * kInstrSize;
- static const int kExternalTargetSize = 0 * kInstrSize;
+ static const int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit constant.
// Before jump-optimizations, this constant was used in
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 09a9924387..eeb84c3a94 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -67,9 +67,11 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects a0 to contain the number of arguments
+ // JumpToExternalReference expects s0 to contain the number of arguments
// including the receiver and the extra arguments.
- __ Addu(a0, a0, Operand(num_extra_args + 1));
+ __ Addu(s0, a0, num_extra_args + 1);
+ __ sll(s1, s0, kPointerSizeLog2);
+ __ Subu(s1, s1, kPointerSize);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -1095,8 +1097,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 3eaa524918..1b3242cf09 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -70,13 +70,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in a0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(a0, &check_heap_number);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
__ bind(&check_heap_number);
EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
__ bind(&call_builtin);
__ push(a0);
@@ -128,9 +128,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// found in the shared function info object.
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
// Create a new closure through the slower runtime call.
@@ -179,8 +179,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Remove the on-stack argument and return.
__ mov(cp, v0);
- __ Pop();
- __ Ret();
+ __ DropAndRet(1);
// Need to collect. Call into runtime system.
__ bind(&gc);
@@ -242,8 +241,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Remove the on-stack argument and return.
__ mov(cp, v0);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
// Need to collect. Call into runtime system.
__ bind(&gc);
@@ -368,8 +366,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
@@ -405,16 +402,14 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// Allocate the JS object and copy header together with all in-object
// properties from the boilerplate.
- __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+ __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
for (int i = 0; i < size; i += kPointerSize) {
__ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(a0, i));
+ __ sw(a1, FieldMemOperand(v0, i));
}
// Return and remove the on-stack parameters.
- __ Drop(4);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
+ __ DropAndRet(4);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
@@ -478,7 +473,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ And(exponent, source_, Operand(HeapNumber::kSignMask));
// Subtract from 0 if source was negative.
__ subu(at, zero_reg, source_);
- __ movn(source_, at, exponent);
+ __ Movn(source_, at, exponent);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -490,15 +485,15 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
// Safe to use 'at' as dest reg here.
__ Or(at, exponent, Operand(exponent_word_for_1));
- __ movn(exponent, at, source_); // Write exp when source not 0.
+ __ Movn(exponent, at, source_); // Write exp when source not 0.
// 1, 0 and -1 all have 0 for the second word.
+ __ Ret(USE_DELAY_SLOT);
__ mov(mantissa, zero_reg);
- __ Ret();
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
- __ clz(zeros_, source_);
+ __ Clz(zeros_, source_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here.
__ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
@@ -514,9 +509,9 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
// And the top (top 20 bits).
__ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
- __ or_(exponent, exponent, source_);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(exponent, exponent, source_);
}
@@ -721,7 +716,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ clz(dst1, int_scratch);
+ __ Clz(dst1, int_scratch);
__ li(scratch2, 31);
__ Subu(dst1, scratch2, dst1);
@@ -1025,9 +1020,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
}
// Place heap_number_result in v0 and return to the pushed return address.
- __ mov(v0, heap_number_result);
__ pop(ra);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
}
@@ -1079,7 +1074,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ or_(scratch_, scratch_, sign_);
// Subtract from 0 if the value was negative.
__ subu(at, zero_reg, the_int_);
- __ movn(the_int_, at, sign_);
+ __ Movn(the_int_, at, sign_);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
@@ -1163,6 +1158,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ bind(&return_equal);
+
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
@@ -1234,8 +1230,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal (lhs is already not zero).
+ __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ mov(v0, lhs);
- __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@@ -1273,8 +1269,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal.
+ __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ li(v0, Operand(1));
- __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@@ -1354,12 +1350,13 @@ void EmitNanCheck(MacroAssembler* masm, Condition cc) {
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
+
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
}
- __ Ret(); // Return.
+ __ Ret();
__ bind(&neither_is_nan);
}
@@ -1408,6 +1405,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
__ bind(&return_result_equal);
+
__ li(v0, Operand(EQUAL));
__ Ret();
}
@@ -1439,6 +1437,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ BranchF(&less_than, NULL, lt, f12, f14);
// Not equal, not less, not NaN, must be greater.
+
__ li(v0, Operand(GREATER));
__ Ret();
@@ -1469,8 +1468,8 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Return non-zero.
Label return_not_equal;
__ bind(&return_not_equal);
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1));
- __ Ret();
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
@@ -1549,8 +1548,8 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
- __ Ret();
__ bind(&object_test);
__ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
@@ -1565,8 +1564,8 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
__ and_(a0, a2, a3);
__ And(a0, a0, Operand(1 << Map::kIsUndetectable));
- __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ xori(v0, a0, 1 << Map::kIsUndetectable);
}
@@ -1673,8 +1672,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
- __ Addu(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
+ __ DropAndRet(1);
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
@@ -1696,8 +1694,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(a2, &not_two_smis);
__ sra(a1, a1, 1);
__ sra(a0, a0, 1);
- __ Subu(v0, a1, a0);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
__ bind(&not_two_smis);
} else if (FLAG_debug_code) {
__ Or(a2, a1, a0);
@@ -1750,15 +1748,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
__ c(OLT, D, f12, f14);
- __ movt(v0, t0);
+ __ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
- __ movf(v0, t1);
+ __ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0.
__ c(EQ, D, f12, f14);
- __ movt(v0, t2);
+ __ Movt(v0, t2);
__ Ret();
@@ -1899,7 +1897,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
// Undetectable -> false.
- __ movn(tos_, zero_reg, at);
+ __ Movn(tos_, zero_reg, at);
__ Ret(ne, at, Operand(zero_reg));
}
}
@@ -1916,8 +1914,8 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
Label skip;
__ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ Ret(); // the string length is OK as the return value
__ bind(&skip);
}
@@ -1955,7 +1953,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
// The value of a root is never NULL, so we can avoid loading a non-null
// value into tos_ when we want to return 'true'.
if (!result) {
- __ movz(tos_, zero_reg, at);
+ __ Movz(tos_, zero_reg, at);
}
__ Ret(eq, at, Operand(zero_reg));
}
@@ -2092,8 +2090,8 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
__ Branch(slow, eq, t0, Operand(zero_reg));
// Return '0 - value'.
- __ Subu(v0, zero_reg, a0);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, zero_reg, a0);
}
@@ -2423,8 +2421,8 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
// Negating it results in 'lt'.
__ Branch(&skip, lt, scratch2, Operand(zero_reg));
ASSERT(Smi::FromInt(0) == 0);
- __ mov(v0, zero_reg);
- __ Ret(); // Return smi 0 if the non-zero one was positive.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
__ bind(&skip);
// We fall through here if we multiplied a negative number with 0, because
// that would mean we should produce -0.
@@ -2479,23 +2477,23 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
break;
case Token::BIT_OR:
- __ Or(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(v0, left, right);
break;
case Token::BIT_AND:
- __ And(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ and_(v0, left, right);
break;
case Token::BIT_XOR:
- __ Xor(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ xor_(v0, left, right);
break;
case Token::SAR:
// Remove tags from right operand.
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(scratch1, left, scratch1);
// Smi tag result.
- __ And(v0, scratch1, Operand(~kSmiTagMask));
+ __ And(v0, scratch1, ~kSmiTagMask);
__ Ret();
break;
case Token::SHR:
@@ -2607,8 +2605,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// kValueOffset. On MIPS this workaround is built into sdc1 so
// there's no point in generating even more instructions.
__ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
- __ Ret();
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
@@ -3482,8 +3480,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
__ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, cache_entry);
- __ Ret();
__ bind(&invalid_cache);
// The cache is invalid. Call runtime which will recreate the
@@ -3662,7 +3660,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ne,
double_exponent,
double_scratch);
-
+ // double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@@ -3682,7 +3680,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ne,
double_exponent,
double_scratch);
-
+ // double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@@ -3866,9 +3864,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ sw(a1, MemOperand(a0));
}
- // Prepare arguments for C routine: a0 = argc, a1 = argv
+ // Prepare arguments for C routine.
+ // a0 = argc
__ mov(a0, s0);
- __ mov(a1, s1);
+ // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
@@ -3888,30 +3887,28 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// coverage code can interfere with the proper calculation of ra.
Label find_ra;
masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->nop(); // Branch delay slot nop.
+ masm->mov(a1, s1);
masm->bind(&find_ra);
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 6;
+ const int kNumInstructionsToJump = 5;
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
- masm->Subu(sp, sp, kCArgsSlotsSize);
+ // Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
masm->jalr(t9);
- masm->nop(); // Branch delay slot nop.
+ // Set up sp in the delay slot.
+ masm->addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
ASSERT_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
- // Restore stack (remove arg slots).
- __ Addu(sp, sp, kCArgsSlotsSize);
-
if (always_allocate) {
// It's okay to clobber a2 and a3 here. v0 & v1 contain result.
__ li(a2, Operand(scope_depth));
@@ -3925,14 +3922,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ addiu(a2, v0, 1);
__ andi(t0, a2, kFailureTagMask);
- __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+ __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
+ // Restore stack (remove arg slots) in branch delay slot.
+ __ addiu(sp, sp, kCArgsSlotsSize);
+
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0);
- __ Ret();
+ __ LeaveExitFrame(save_doubles_, s0, true);
// Check if we should retry or throw exception.
Label retry;
@@ -3943,11 +3942,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(throw_out_of_memory_exception, eq,
- v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ Branch(USE_DELAY_SLOT,
+ throw_out_of_memory_exception,
+ eq,
+ v0,
+ Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ // If we throw the OOM exception, the value of a3 doesn't matter.
+ // Any instruction can be in the delay slot that's not a jump.
// Retrieve the pending exception and clear the variable.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
@@ -3955,8 +3959,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ Branch(throw_termination_exception, eq,
- v0, Operand(isolate->factory()->termination_exception()));
+ __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
+ __ Branch(throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
__ jmp(throw_normal_exception);
@@ -3968,8 +3972,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
- // a0: number of arguments including receiver
- // a1: pointer to builtin function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
@@ -3979,19 +3984,18 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
+
// Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
- __ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(s0, a0);
- __ mov(s2, a1);
-
// s0: number of arguments (C callee-saved)
// s1: pointer to first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
@@ -4083,6 +4087,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
}
+ __ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
// We build an EntryFrame.
@@ -4155,7 +4160,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ li(t1, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(t1, MemOperand(t0));
@@ -4199,7 +4204,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
- __ Branch(&non_outermost_js_2, ne, t1,
+ __ Branch(&non_outermost_js_2,
+ ne,
+ t1,
Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
@@ -4364,8 +4371,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
// Null is not instance of anything.
- __ Branch(&object_not_null, ne, scratch,
- Operand(masm->isolate()->factory()->null_value()));
+ __ Branch(&object_not_null,
+ ne,
+ scratch,
+ Operand(masm->isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -4470,8 +4479,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne,
- a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Branch(&runtime,
+ ne,
+ a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -4503,7 +4514,9 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label adaptor_frame, try_allocate;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a2,
+ __ Branch(&adaptor_frame,
+ eq,
+ a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
@@ -4693,8 +4706,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&arguments_loop, lt, t5, Operand(a2));
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
// a2 = argument count (tagged)
@@ -4799,8 +4811,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Return and remove the on-stack parameters.
__ bind(&done);
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
@@ -5008,7 +5019,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
- __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+ __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
@@ -5116,14 +5127,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ Branch(&success, eq,
- v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Label failure;
- __ Branch(&failure, eq,
- v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
- __ Branch(&runtime, ne,
- v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
@@ -5149,8 +5157,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
__ li(v0, Operand(isolate->factory()->null_value()));
- __ Addu(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
+ __ DropAndRet(4);
// Process the result from the native regexp code.
__ bind(&success);
@@ -5211,14 +5218,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sll(a3, a3, kSmiTagSize); // Convert to Smi.
__ sw(a3, MemOperand(a0, 0));
__ Branch(&next_capture, USE_DELAY_SLOT);
- __ addiu(a0, a0, kPointerSize); // In branch delay slot.
+ __ addiu(a0, a0, kPointerSize); // In branch delay slot.
__ bind(&done);
// Return last match info.
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
- __ Addu(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
+ __ DropAndRet(4);
// External string. Short external strings have already been ruled out.
// a0: scratch
@@ -5330,8 +5336,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ addiu(a3, a3, kPointerSize); // In branch delay slot.
__ bind(&done);
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
__ bind(&slowcase);
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
@@ -5879,10 +5884,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
__ Or(c1, c1, scratch1);
__ bind(&tmp);
- __ Branch(not_found,
- Uless_equal,
- scratch,
- Operand(static_cast<int>('9' - '0')));
+ __ Branch(
+ not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
__ bind(&not_array_index);
// Calculate the two character string hash.
@@ -6037,7 +6040,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
__ ori(at, zero_reg, StringHasher::kZeroHash);
- __ movz(hash, at, hash);
+ __ Movz(hash, at, hash);
}
@@ -6136,7 +6139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ And(t0, a1, Operand(kIsIndirectStringMask));
__ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
-
+ // t0 is used as a scratch register and can be overwritten in either case.
__ And(t0, a1, Operand(kSlicedNotConsMask));
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
@@ -6327,7 +6330,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Subu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1);
- __ movn(scratch1, scratch2, scratch4);
+ __ Movn(scratch1, scratch2, scratch4);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
@@ -6409,8 +6412,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&not_same);
@@ -6485,7 +6487,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
__ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
__ mov(v0, a0); // Assume we'll return first string (from a0).
- __ movz(v0, a1, a2); // If first is empty, return second (from a1).
+ __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
__ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
__ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
__ and_(t4, t4, t5); // Branch if both strings were non-empty.
@@ -6553,8 +6555,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2,
- Operand(ConsString::kMinLength));
+ __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
@@ -6815,16 +6816,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(GREATER)); // In delay slot.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
__ bind(&fpu_eq);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(EQUAL)); // In delay slot.
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
__ bind(&fpu_lt);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(LESS)); // In delay slot.
+ __ li(v0, Operand(LESS));
+ __ Ret();
}
__ bind(&unordered);
@@ -6895,6 +6896,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
Label miss;
+ bool equality = Token::IsEqualityOp(op_);
+
// Registers containing left and right operands respectively.
Register left = a1;
Register right = a0;
@@ -6922,41 +6925,52 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
Label left_ne_right;
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+ __ Branch(&left_ne_right, ne, left, Operand(right));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, zero_reg); // In the delay slot.
- __ Ret();
__ bind(&left_ne_right);
// Handle not identical strings.
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsSymbolMask));
- Label is_symbol;
- __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
- __ mov(v0, a0); // In the delay slot.
- // Make sure a0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(a0));
- __ Ret();
- __ bind(&is_symbol);
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ Label is_symbol;
+ __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
+ __ bind(&is_symbol);
+ }
// Check that both strings are sequential ASCII.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
- &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ tmp1, tmp2, tmp3, tmp4, &runtime);
// Compare flat ASCII strings. Returns when done.
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ }
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -6975,8 +6989,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
ASSERT(GetCondition() == eq);
- __ Subu(v0, a0, Operand(a1));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a0, a1);
__ bind(&miss);
GenerateMiss(masm);
@@ -7009,8 +7023,9 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(ra);
__ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
- __ push(t0);
- __ CallExternalReference(miss, 3);
+ __ addiu(sp, sp, -kPointerSize);
+ __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+ __ sw(t0, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -7067,8 +7082,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
// Push return address (accessible to GC through exit frame pc).
// This spot for ra was reserved in EnterExitFrame.
masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET), true);
+ masm->li(ra,
+ Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ CONSTANT_SIZE);
// Call the function.
masm->Jump(t9);
// Make sure the stored 'ra' points to this position.
@@ -7320,17 +7337,17 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
+ __ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
- __ Ret();
}
__ bind(&in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
__ li(result, 1);
- __ Ret();
__ bind(&not_in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
- __ Ret();
}
@@ -7664,7 +7681,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
+ __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
&slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 8cbb771952..9acccdc2ca 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -37,8 +37,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-TranscendentalFunction CreateTranscendentalFunction(
- TranscendentalCache::Type type) {
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
@@ -50,6 +49,10 @@ TranscendentalFunction CreateTranscendentalFunction(
}
+UnaryMathFunction CreateSqrtFunction() {
+ return &sqrt;
+}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index d62a8901f0..fd04722792 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,11 +39,20 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+enum ArchVariants {
+ kMips32r2,
+ kMips32r1,
+ kLoongson
+};
#ifdef _MIPS_ARCH_MIPS32R2
- #define mips32r2 1
+ static const ArchVariants kArchVariant = kMips32r2;
+#elif _MIPS_ARCH_LOONGSON
+// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
+// which predates (and is a subset of) the mips32r2 and r1 architectures.
+ static const ArchVariants kArchVariant = kLoongson;
#else
- #define mips32r2 0
+ static const ArchVariants kArchVariant = kMips32r1;
#endif
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 26b343c873..83f5f50172 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -152,8 +152,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(a0, zero_reg); // No arguments.
- __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+ __ PrepareCEntryArgs(0); // No arguments.
+ __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 611fbaaf96..51c2e46778 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -119,7 +119,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
const int kInstrSize = Assembler::kInstrSize;
// This structure comes from FullCodeGenerator::EmitStackCheck.
// The call of the stack guard check has the following form:
- // sltu at, sp, t0
+ // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
// lui t9, <stack guard address> upper
// ori t9, <stack guard address> lower
@@ -167,7 +167,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->sltu(at, sp, t0);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->slt(at, a3, zero_reg);
+ } else {
+ patcher.masm()->sltu(at, sp, t0);
+ }
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index fde0c58f08..1d40c2c820 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -515,7 +515,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "cvt.w.d 'fd, 'fs");
break;
case CVT_L_D: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "cvt.l.d 'fd, 'fs");
} else {
Unknown(instr);
@@ -526,7 +526,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "trunc.w.d 'fd, 'fs");
break;
case TRUNC_L_D: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "trunc.l.d 'fd, 'fs");
} else {
Unknown(instr);
@@ -592,7 +592,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case L:
switch (instr->FunctionFieldRaw()) {
case CVT_D_L: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "cvt.d.l 'fd, 'fs");
} else {
Unknown(instr);
@@ -600,7 +600,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
}
case CVT_S_L: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "cvt.s.l 'fd, 'fs");
} else {
Unknown(instr);
@@ -636,7 +636,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
@@ -653,7 +653,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
@@ -770,7 +770,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
@@ -778,7 +778,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
}
case EXT: {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index c5ef2ccbf7..657bee8657 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -42,6 +42,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -119,8 +120,10 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
- return 11 * Instruction::kInstrSize;
+ UNREACHABLE();
+ return 10 * Instruction::kInstrSize;
}
@@ -142,32 +145,11 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- // We can optionally optimize based on counters rather than statistical
- // sampling.
- if (info->ShouldSelfOptimize()) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[adding self-optimization header to %s]\n",
- *info->function()->debug_name()->ToCString());
- }
- has_self_optimization_header_ = true;
- MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
- Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
- JSGlobalPropertyCell* cell;
- if (maybe_cell->To(&cell)) {
- __ li(a2, Handle<JSGlobalPropertyCell>(cell));
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- __ Subu(a3, a3, Operand(Smi::FromInt(1)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- Handle<Code> compile_stub(
- isolate()->builtins()->builtin(Builtins::kLazyRecompile));
- __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
- ASSERT_EQ(masm_->pc_offset(), self_optimization_header_size());
- }
- }
-
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -341,6 +323,34 @@ void FullCodeGenerator::ClearAccumulator() {
}
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ li(a2, Operand(profiling_counter_));
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ Subu(a3, a3, Operand(Smi::FromInt(delta)));
+ __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+ // Self-optimization is a one-off thing: if it fails, don't try again.
+ reset_value = Smi::kMaxValue;
+ }
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
+ __ li(a2, Operand(profiling_counter_));
+ __ li(a3, Operand(Smi::FromInt(reset_value)));
+ __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@@ -351,16 +361,35 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
- StackCheckStub stub;
- __ CallStub(&stub);
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ sltu(at, sp, t0);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -383,6 +412,32 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else if (FLAG_weighted_back_edges) {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ __ push(v0);
+ if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+ __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(a2);
+ __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+ } else {
+ InterruptStub stub;
+ __ CallStub(&stub);
+ }
+ __ pop(v0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+ }
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -902,7 +957,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
@@ -1195,7 +1250,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, mode);
+ CallIC(ic, mode);
}
@@ -1251,7 +1306,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
} else { // LET || CONST_HARMONY
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
@@ -1279,7 +1334,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
break;
}
@@ -1343,7 +1398,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Uninitalized const bindings outside of harmony mode are unholed.
ASSERT(var->mode() == CONST);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
context()->Plug(v0);
break;
@@ -1421,6 +1476,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ push(a1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1456,6 +1521,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1482,7 +1548,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1505,27 +1571,29 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ lw(a0, MemOperand(sp));
- __ push(a0);
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
- } else {
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
- VisitForStackValue(value);
- }
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ lw(a0, MemOperand(sp)); // Duplicate receiver.
+ __ push(a0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ lw(a0, MemOperand(sp));
@@ -1753,7 +1821,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1762,7 +1830,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1790,7 +1858,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1873,7 +1941,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -1914,7 +1982,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1927,7 +1995,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
}
@@ -1945,7 +2013,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -2064,7 +2132,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2116,7 +2184,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2151,6 +2219,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+ ic_total_count_++;
+ __ Call(code, rmode, ast_id);
+}
+
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@@ -2168,7 +2244,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2201,7 +2277,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2600,7 +2676,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label entry, loop;
// The use of t2 to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
- __ li(t2, Operand(FACTORY->value_of_symbol()));
+ __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@@ -2970,6 +3046,52 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done;
+ Register object = v0;
+ Register result = v0;
+ Register scratch0 = t5;
+ Register scratch1 = a1;
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ GetObjectType(object, scratch1, scratch1);
+ __ Assert(eq, "Trying to get date field from non-date.",
+ scratch1, Operand(JS_DATE_TYPE));
+#endif
+
+ if (index->value() == 0) {
+ __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ li(scratch1, Operand(stamp));
+ __ lw(scratch1, MemOperand(scratch1));
+ __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Branch(&runtime, ne, scratch1, Operand(scratch0));
+ __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch1);
+ __ li(a1, Operand(index));
+ __ Move(a0, object);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3769,7 +3891,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3925,7 +4047,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@@ -4036,7 +4158,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4069,7 +4191,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4087,7 +4209,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4113,7 +4235,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic);
+ CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4291,7 +4413,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index b6f019f478..2c4da1a886 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -512,8 +512,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ Push(a3, a2);
// Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ __ PrepareCEntryArgs(2);
+ __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -758,8 +758,6 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -773,10 +771,12 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
+ __ CheckMap(scratch1,
+ scratch2,
+ Heap::kNonStrictArgumentsElementsMapRootIndex,
+ slow_case,
+ DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -788,7 +788,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
__ li(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
+ __ Mul(scratch3, key, scratch3);
__ Addu(scratch3, scratch3, Operand(kOffset));
__ Addu(scratch2, scratch1, scratch3);
@@ -801,7 +801,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
__ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ li(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
+ __ Mul(scratch3, scratch2, scratch3);
__ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
__ Addu(scratch2, scratch1, scratch3);
return MemOperand(scratch2);
@@ -820,13 +820,15 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ __ CheckMap(backing_store,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ slow_case,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ li(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
+ __ Mul(scratch, key, scratch);
__ Addu(scratch,
scratch,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -844,8 +846,8 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, mapped_location);
- __ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in a2.
MemOperand unmapped_location =
@@ -853,8 +855,8 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ lw(a2, unmapped_location);
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ Branch(&slow, eq, a2, Operand(a3));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
- __ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
}
@@ -1253,8 +1255,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&check_if_double_array, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ Branch(
+ &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
+
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@@ -1262,8 +1265,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Branch(&fast_object_without_map_check);
__ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@@ -1285,8 +1287,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register scratch_value = t0;
Register address = t1;
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&fast_double_with_map_check, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ Branch(&fast_double_with_map_check,
+ ne,
+ elements_map,
+ Heap::kFixedArrayMapRootIndex);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
@@ -1323,8 +1327,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index d0531ec71a..94e8979ba3 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -612,7 +612,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -635,13 +634,9 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&skip);
}
- if (cc == al) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- // TODO(plind): The Arm port is a little different here, due to their
- // DeOpt jump table, which is not used for Mips yet.
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- }
+ // TODO(plind): The Arm port is a little different here, due to their
+ // DeOpt jump table, which is not used for Mips yet.
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
}
@@ -1018,7 +1013,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Generate standard code.
__ li(at, constant);
- __ mul(result, left, at);
+ __ Mul(result, left, at);
}
}
@@ -1036,7 +1031,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
- __ mul(result, left, right);
+ __ Mul(result, left, right);
}
if (bailout_on_minus_zero) {
@@ -1261,6 +1256,46 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(a0));
+ ASSERT(result.is(v0));
+ ASSERT(!scratch.is(scratch0()));
+ ASSERT(!scratch.is(object));
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ GetObjectType(object, scratch, scratch);
+ __ Assert(eq, "Trying to get date field from non-date.",
+ scratch, Operand(JS_DATE_TYPE));
+#endif
+
+ if (index->value() == 0) {
+ __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ li(scratch, Operand(stamp));
+ __ lw(scratch, MemOperand(scratch));
+ __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Branch(&runtime, ne, scratch, Operand(scratch0()));
+ __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ li(a1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2042,7 +2077,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
- __ li(result, Operand(factory()->the_hole_value()), true);
+ __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2097,7 +2132,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ li(temp, Operand(delta * kPointerSize), true);
+ __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
CallCodeGeneric(stub.GetCode(),
@@ -2624,8 +2659,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
- __ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
- __ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
+ __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
+ __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
}
@@ -2650,15 +2685,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(a0)); // Used for parameter count.
- ASSERT(function.is(a1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(v0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2699,6 +2729,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ lw(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(a0)); // Used for parameter count.
+ ASSERT(function.is(a1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(v0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 1c4e1da3fb..1e0c2160aa 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -1097,6 +1097,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), a1);
LOperand* receiver = UseFixed(instr->receiver(), a0);
@@ -1604,6 +1612,13 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
}
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), a0);
+ LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 2128ce3e9e..5a7bf4d941 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -177,8 +177,9 @@ class LCodeGen;
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
- V(LoadFieldByIndex)
-
+ V(LoadFieldByIndex) \
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -467,6 +468,20 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
@@ -989,6 +1004,22 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 77d03b5554..e93a4175b3 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -574,12 +574,22 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- mul(rd, rs, rt.rm());
+ if (kArchVariant == kLoongson) {
+ mult(rs, rt.rm());
+ mflo(rd);
+ } else {
+ mul(rd, rs, rt.rm());
+ }
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- mul(rd, rs, at);
+ if (kArchVariant == kLoongson) {
+ mult(rs, at);
+ mflo(rd);
+ } else {
+ mul(rd, rs, at);
+ }
}
}
@@ -734,7 +744,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -758,13 +768,12 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-
//------------Pseudo-instructions-------------
-void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && !gen2instr) {
+ if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
@@ -776,7 +785,7 @@ void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseReg(j.rmode_) || gen2instr) {
+ } else {
if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
@@ -922,7 +931,7 @@ void MacroAssembler::Ext(Register rt,
ASSERT(pos < 32);
ASSERT(pos + size < 33);
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
@@ -946,7 +955,7 @@ void MacroAssembler::Ins(Register rt,
ASSERT(pos + size <= 32);
ASSERT(size != 0);
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
ins_(rt, rs, pos, size);
} else {
ASSERT(!rt.is(t8) && !rs.is(t8));
@@ -1016,6 +1025,48 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ trunc_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ trunc_w_d(fd, fs);
+ }
+}
+
+void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ round_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ round_w_d(fd, fs);
+ }
+}
+
+
+void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ floor_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ floor_w_d(fd, fs);
+ }
+}
+
+
+void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ ceil_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ ceil_w_d(fd, fs);
+ }
+}
+
void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs,
@@ -1146,6 +1197,104 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
}
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, ne, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movz(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, eq, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movn(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+ if (kArchVariant == kLoongson) {
+ // Tests an FP condition code and then conditionally move rs to rd.
+ // We do not currently use any FPU cc bit other than bit 0.
+ ASSERT(cc == 0);
+ ASSERT(!(rs.is(t8) || rd.is(t8)));
+ Label done;
+ Register scratch = t8;
+ // For testing purposes we need to fetch content of the FCSR register and
+ // than test its cc (floating point condition code) bit (for cc = 0, it is
+ // 24. bit of the FCSR).
+ cfc1(scratch, FCSR);
+ // For the MIPS I, II and III architectures, the contents of scratch is
+ // UNPREDICTABLE for the instruction immediately following CFC1.
+ nop();
+ srl(scratch, scratch, 16);
+ andi(scratch, scratch, 0x0080);
+ Branch(&done, eq, scratch, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movt(rd, rs, cc);
+ }
+}
+
+
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+ if (kArchVariant == kLoongson) {
+ // Tests an FP condition code and then conditionally move rs to rd.
+ // We do not currently use any FPU cc bit other than bit 0.
+ ASSERT(cc == 0);
+ ASSERT(!(rs.is(t8) || rd.is(t8)));
+ Label done;
+ Register scratch = t8;
+ // For testing purposes we need to fetch content of the FCSR register and
+ // than test its cc (floating point condition code) bit (for cc = 0, it is
+ // 24. bit of the FCSR).
+ cfc1(scratch, FCSR);
+ // For the MIPS I, II and III architectures, the contents of scratch is
+ // UNPREDICTABLE for the instruction immediately following CFC1.
+ nop();
+ srl(scratch, scratch, 16);
+ andi(scratch, scratch, 0x0080);
+ Branch(&done, ne, scratch, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movf(rd, rs, cc);
+ }
+}
+
+
+void MacroAssembler::Clz(Register rd, Register rs) {
+ if (kArchVariant == kLoongson) {
+ ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+ Register mask = t8;
+ Register scratch = t9;
+ Label loop, end;
+ mov(at, rs);
+ mov(rd, zero_reg);
+ lui(mask, 0x8000);
+ bind(&loop);
+ and_(scratch, at, mask);
+ Branch(&end, ne, scratch, Operand(zero_reg));
+ addiu(rd, rd, 1);
+ Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
+ srl(mask, mask, 1);
+ bind(&end);
+ } else {
+ clz(rd, rs);
+ }
+}
+
+
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
@@ -1236,8 +1385,8 @@ void MacroAssembler::ConvertToInt32(Register source,
subu(scratch2, zero_reg, scratch);
// Trick to check sign bit (msb) held in dest, count leading zero.
// 0 indicates negative, save negative version with conditional move.
- clz(dest, dest);
- movz(scratch, scratch2, dest);
+ Clz(dest, dest);
+ Movz(scratch, scratch2, dest);
mov(dest, scratch);
}
bind(&done);
@@ -1268,16 +1417,16 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- round_w_d(result, double_input);
+ Round_w_d(result, double_input);
break;
case kRoundToZero:
- trunc_w_d(result, double_input);
+ Trunc_w_d(result, double_input);
break;
case kRoundToPlusInf:
- ceil_w_d(result, double_input);
+ Ceil_w_d(result, double_input);
break;
case kRoundToMinusInf:
- floor_w_d(result, double_input);
+ Floor_w_d(result, double_input);
break;
} // End of switch-statement.
@@ -1304,7 +1453,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
// Check for Infinity and NaNs, which should return 0.
Subu(scratch, result, HeapNumber::kExponentMask);
- movz(result, zero_reg, scratch);
+ Movz(result, zero_reg, scratch);
Branch(&done, eq, scratch, Operand(zero_reg));
// Express exponent as delta to (number of mantissa bits + 31).
@@ -1368,7 +1517,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
result = sign;
sign = no_reg;
Subu(result, zero_reg, input_high);
- movz(result, input_high, scratch);
+ Movz(result, input_high, scratch);
bind(&done);
}
@@ -1497,6 +1646,16 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
}
+void MacroAssembler::Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot) {
+ LoadRoot(at, index);
+ Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
@@ -2288,8 +2447,15 @@ void MacroAssembler::Jump(intptr_t target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+ }
+ // The first instruction of 'li' may be placed in the delay slot.
+ // This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
- Jump(t9, cond, rs, rt, bd);
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
}
@@ -2384,7 +2550,7 @@ void MacroAssembler::Call(Address target,
// Must record previous source positions before the
// li() generates a new code target.
positions_recorder()->WriteRecordedPositions();
- li(t9, Operand(target_int, rmode), true);
+ li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
@@ -2419,7 +2585,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -2489,14 +2655,16 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
nop();
}
+void MacroAssembler::DropAndRet(int drop) {
+ Ret(USE_DELAY_SLOT);
+ addiu(sp, sp, drop * kPointerSize);
+}
void MacroAssembler::DropAndRet(int drop,
Condition cond,
Register r1,
const Operand& r2) {
- // This is a workaround to make sure only one branch instruction is
- // generated. It relies on Drop and Ret not creating branches if
- // cond == cc_always.
+ // Both Drop and Ret need to be conditional.
Label skip;
if (cond != cc_always) {
Branch(&skip, NegateCondition(cond), r1, r2);
@@ -2563,8 +2731,8 @@ void MacroAssembler::Push(Handle<Object> handle) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- mov(a0, zero_reg);
- li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ PrepareCEntryArgs(0);
+ PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@@ -2593,7 +2761,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- li(t1, Operand(CodeObject()));
+ li(t1, Operand(CodeObject()), CONSTANT_SIZE);
li(t2, Operand(state));
// Push the frame pointer, context, state, and code object.
@@ -3222,7 +3390,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
- isolate()->factory()->heap_number_map(),
+ Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
@@ -3726,10 +3894,13 @@ void MacroAssembler::GetObjectType(Register object,
// -----------------------------------------------------------------------------
// Runtime calls.
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
+void MacroAssembler::CallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
}
@@ -3812,8 +3983,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0);
- Ret();
+ LeaveExitFrame(false, s0, true);
bind(&promote_scheduled_exception);
TailCallExternalReference(
@@ -4011,8 +4181,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, num_arguments);
- li(a1, Operand(ExternalReference(f, isolate())));
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -4020,8 +4190,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- li(a0, Operand(function->nargs));
- li(a1, Operand(ExternalReference(function, isolate())));
+ PrepareCEntryArgs(function->nargs);
+ PrepareCEntryFunction(ExternalReference(function, isolate()));
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -4033,12 +4203,13 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- li(a0, Operand(num_arguments));
- li(a1, Operand(ext));
+ int num_arguments,
+ BranchDelaySlot bd) {
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -4049,7 +4220,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, Operand(num_arguments));
+ PrepareCEntryArgs(num_arguments);
JumpToExternalReference(ext);
}
@@ -4063,10 +4234,16 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- li(a1, Operand(builtin));
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd) {
+ PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ al,
+ zero_reg,
+ Operand(zero_reg),
+ bd);
}
@@ -4325,7 +4502,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()));
+ li(t9, Operand(CodeObject()), CONSTANT_SIZE);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
@@ -4369,7 +4546,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ // Accessed from ExitFrame::code_slot.
+ li(t8, Operand(CodeObject()), CONSTANT_SIZE);
sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
@@ -4413,7 +4591,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -4439,11 +4618,17 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(sp, fp); // Respect ABI stack constraint.
lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
- addiu(sp, sp, 8);
+
if (argument_count.is_valid()) {
sll(t8, argument_count, kPointerSizeLog2);
addu(sp, sp, t8);
}
+
+ if (do_return) {
+ Ret(USE_DELAY_SLOT);
+ // If returning, the instruction in the delay slot will be the addiu below.
+ }
+ addiu(sp, sp, 8);
}
@@ -5088,7 +5273,7 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, &not_smi);
- li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
bind(&not_smi);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 6ae8657e1e..f57418f386 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -81,6 +81,16 @@ enum BranchDelaySlot {
PROTECT
};
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 16 bits, then
+ // optimize the li to use a single instruction, rather than lui/ori pair.
+ OPTIMIZE_SIZE = 0,
+ // Always use 2 instructions (lui/ori pair), even if the constant could
+ // be loaded with just one, so that this value is patchable later.
+ CONSTANT_SIZE = 1
+};
+
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
@@ -184,6 +194,12 @@ class MacroAssembler: public Assembler {
Ret(cond, rs, rt, bd);
}
+ void Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot = PROTECT);
+
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -193,10 +209,14 @@ class MacroAssembler: public Assembler {
Register reg = no_reg,
const Operand& op = Operand(no_reg));
- void DropAndRet(int drop = 0,
- Condition cond = cc_always,
- Register reg = no_reg,
- const Operand& op = Operand(no_reg));
+ // Trivial case of DropAndRet that utilizes the delay slot and only emits
+ // 2 instructions.
+ void DropAndRet(int drop);
+
+ void DropAndRet(int drop,
+ Condition cond,
+ Register reg,
+ const Operand& op);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
@@ -226,7 +246,14 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
+ // Conditional move.
void Move(FPURegister dst, double imm);
+ void Movz(Register rd, Register rs, Register rt);
+ void Movn(Register rd, Register rs, Register rt);
+ void Movt(Register rd, Register rs, uint16_t cc = 0);
+ void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+ void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -237,7 +264,6 @@ class MacroAssembler: public Assembler {
Branch(L);
}
-
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -568,12 +594,13 @@ class MacroAssembler: public Assembler {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
// Load int32 in the rd register.
- void li(Register rd, Operand j, bool gen2instr = false);
- inline void li(Register rd, int32_t j, bool gen2instr = false) {
- li(rd, Operand(j), gen2instr);
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
}
- inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
- li(dst, Operand(value), gen2instr);
+ inline void li(Register dst, Handle<Object> value,
+ LiFlags mode = OPTIMIZE_SIZE) {
+ li(dst, Operand(value), mode);
}
// Push multiple registers on the stack.
@@ -692,6 +719,10 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ void Trunc_w_d(FPURegister fd, FPURegister fs);
+ void Round_w_d(FPURegister fd, FPURegister fs);
+ void Floor_w_d(FPURegister fd, FPURegister fs);
+ void Ceil_w_d(FPURegister fd, FPURegister fs);
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
@@ -762,7 +793,9 @@ class MacroAssembler: public Assembler {
int stack_space = 0);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register arg_count,
+ bool do_return = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -934,7 +967,8 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
+ // the FastDoubleElements array elements. Otherwise jump to fail, in which
+ // case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
@@ -1072,9 +1106,22 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Runtime calls.
+ // See comments at the beginning of CEntryStub::Generate.
+ inline void PrepareCEntryArgs(int num_args) {
+ li(s0, num_args);
+ li(s1, (num_args - 1) * kPointerSize);
+ }
+
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(s2, Operand(ref));
+ }
+
// Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void CallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@@ -1090,7 +1137,8 @@ class MacroAssembler: public Assembler {
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
+ int num_arguments,
+ BranchDelaySlot bd = PROTECT);
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
@@ -1156,7 +1204,8 @@ class MacroAssembler: public Assembler {
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to the builtin routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd = PROTECT);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 330ff2b8d1..ae4da936ce 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -386,7 +386,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Check if function returned non-zero for success or zero for failure.
@@ -678,7 +678,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// string, and store that value in a local variable.
__ mov(t5, a1);
__ li(a1, Operand(1));
- __ movn(a1, zero_reg, t5);
+ __ Movn(a1, zero_reg, t5);
__ sw(a1, MemOperand(frame_pointer(), kAtStart));
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -698,7 +698,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
// Load previous char as initial value of current character register.
Label at_start;
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
@@ -783,7 +783,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -813,7 +813,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
// Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
@@ -1010,7 +1010,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ PrepareCallCFunction(num_arguments, scratch);
__ mov(a2, frame_pointer());
// Code* of self.
- __ li(a1, Operand(masm_->CodeObject()));
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// a0 becomes return address pointer.
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
@@ -1229,7 +1229,7 @@ void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
if (OS::ActivationFrameAlignment() != 0) {
__ lw(sp, MemOperand(sp, 16));
}
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index ba625f45ba..1e72939876 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -309,6 +309,14 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
+ if (instr->InstructionBits() == nopInstr) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ set_register(pc, reinterpret_cast<int32_t>(instr) +
+ Instruction::kInstrSize);
+ return;
+ }
+
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index fde5ba994c..294bc0a074 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -577,8 +577,8 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ li(a0, Operand(5));
- __ li(a1, Operand(ref));
+ __ PrepareCEntryArgs(5);
+ __ PrepareCEntryFunction(ref);
CEntryStub stub(1);
__ CallStub(&stub);
@@ -943,7 +943,7 @@ static void StoreIntAsFloat(MacroAssembler* masm,
__ And(fval, ival, Operand(kBinary32SignMask));
// Negate value if it is negative.
__ subu(scratch1, zero_reg, ival);
- __ movn(ival, scratch1, fval);
+ __ Movn(ival, scratch1, fval);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -957,14 +957,14 @@ static void StoreIntAsFloat(MacroAssembler* masm,
__ Xor(scratch1, ival, Operand(1));
__ li(scratch2, exponent_word_for_1);
__ or_(scratch2, fval, scratch2);
- __ movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
+ __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
__ Branch(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
- __ clz(zeros, ival);
+ __ Clz(zeros, ival);
// Compute exponent and or it into the exponent register.
__ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
@@ -1394,14 +1394,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(a0, miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(a0, miss);
CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
}
@@ -2819,14 +2813,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(a0, &miss);
- }
-
// Check that the map of the global has not changed.
+ __ JumpIfSmi(a0, &miss);
CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
// Get the value from the cell.
@@ -3635,7 +3623,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ li(t0, 0x7ff);
__ Xor(t1, t5, Operand(0xFF));
- __ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
+ __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
__ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
// Rebias exponent.
@@ -3929,7 +3917,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ xor_(t1, t6, t5);
__ li(t2, kBinary32ExponentMask);
- __ movz(t6, t2, t1); // Only if t6 is equal to t5.
+ __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
__ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
// Rebias exponent.
@@ -3942,12 +3930,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ Slt(t1, t1, t6);
__ And(t2, t3, Operand(HeapNumber::kSignMask));
__ Or(t2, t2, Operand(kBinary32ExponentMask));
- __ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
+ __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
__ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
__ Slt(t1, t6, Operand(kBinary32MinExponent));
__ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
+ __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
__ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
__ And(t7, t3, Operand(HeapNumber::kSignMask));
@@ -3997,11 +3985,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// and infinities. All these should be converted to 0.
__ li(t5, HeapNumber::kExponentMask);
__ and_(t6, t3, t5);
- __ movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
+ __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
__ Branch(&done, eq, t6, Operand(zero_reg));
__ xor_(t2, t6, t5);
- __ movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
+ __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
__ Branch(&done, eq, t6, Operand(t5));
// Unbias exponent.
@@ -4009,13 +3997,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
// If exponent is negative then result is 0.
__ slt(t2, t6, zero_reg);
- __ movn(t3, zero_reg, t2); // Only if exponent is negative.
+ __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
__ Branch(&done, lt, t6, Operand(zero_reg));
// If exponent is too big then result is minimal value.
__ slti(t1, t6, meaningfull_bits - 1);
__ li(t2, min_value);
- __ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
+ __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
__ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
__ And(t5, t3, Operand(HeapNumber::kSignMask));
@@ -4026,7 +4014,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ subu(t6, t9, t6);
__ slt(t1, t6, zero_reg);
__ srlv(t2, t3, t6);
- __ movz(t3, t2, t1); // Only if t6 is positive.
+ __ Movz(t3, t2, t1); // Only if t6 is positive.
__ Branch(&sign, ge, t6, Operand(zero_reg));
__ subu(t6, zero_reg, t6);
@@ -4038,7 +4026,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ bind(&sign);
__ subu(t2, t3, zero_reg);
- __ movz(t3, t2, t5); // Only if t5 is zero.
+ __ Movz(t3, t2, t5); // Only if t5 is zero.
__ bind(&done);
@@ -4119,7 +4107,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(a0, &miss_force_generic);
+ __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
+ // The delay slot can be safely used here, a1 is an object pointer.
// Get the elements array.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4127,7 +4116,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// Check that the key is within bounds.
__ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+ __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
// Load the result and make sure it's not the hole.
__ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4137,8 +4126,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ lw(t0, MemOperand(t0));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ Branch(&miss_force_generic, eq, t0, Operand(t1));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
- __ Ret();
__ bind(&miss_force_generic);
Handle<Code> stub =
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 168a12d3aa..c43dd228ec 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1832,6 +1832,11 @@ ScriptMirror.prototype.source = function() {
};
+ScriptMirror.prototype.setSource = function(source) {
+ %DebugSetScriptSource(this.script_, source);
+};
+
+
ScriptMirror.prototype.lineOffset = function() {
return this.script_.line_offset;
};
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 16e03e7548..8eefb23db2 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -532,8 +532,9 @@ void Oddball::OddballVerify() {
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- ASSERT(value <= 1);
// Hidden oddballs have negative smis.
+ const int kLeastHiddenOddballNumber = -4;
+ ASSERT(value <= 1);
ASSERT(value >= kLeastHiddenOddballNumber);
}
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index d0e9bf82ba..78578cc884 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -94,6 +94,15 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+// Getter that returns a tagged Smi and setter that writes a tagged Smi.
+#define ACCESSORS_TO_SMI(holder, name, offset) \
+ Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ }
+
+
+// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS(holder, name, offset) \
int holder::name() { \
Object* value = READ_FIELD(this, offset); \
@@ -935,6 +944,12 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INT64_FIELD(p, offset) \
+ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT64_FIELD(p, offset, value) \
+ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_SHORT_FIELD(p, offset) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
@@ -1692,6 +1707,12 @@ double FixedDoubleArray::get_scalar(int index) {
return result;
}
+int64_t FixedDoubleArray::get_representation(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ ASSERT(index >= 0 && index < this->length());
+ return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
+}
MaybeObject* FixedDoubleArray::get(int index) {
if (is_the_hole(index)) {
@@ -1725,65 +1746,6 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
- int old_length = from->length();
- ASSERT(old_length < length());
- if (old_length * kDoubleSize >= OS::kMinComplexMemCopy) {
- OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
- FIELD_ADDR(from, kHeaderSize),
- old_length * kDoubleSize);
- } else {
- for (int i = 0; i < old_length; ++i) {
- if (from->is_the_hole(i)) {
- set_the_hole(i);
- } else {
- set(i, from->get_scalar(i));
- }
- }
- }
- int offset = kHeaderSize + old_length * kDoubleSize;
- for (int current = from->length(); current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
-}
-
-
-void FixedDoubleArray::Initialize(FixedArray* from) {
- int old_length = from->length();
- ASSERT(old_length <= length());
- for (int i = 0; i < old_length; i++) {
- Object* hole_or_object = from->get(i);
- if (hole_or_object->IsTheHole()) {
- set_the_hole(i);
- } else {
- set(i, hole_or_object->Number());
- }
- }
- int offset = kHeaderSize + old_length * kDoubleSize;
- for (int current = from->length(); current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
-}
-
-
-void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
- int offset = kHeaderSize;
- for (int current = 0; current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
- for (int i = 0; i < from->Capacity(); i++) {
- Object* key = from->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- set(entry, from->ValueAt(i)->Number());
- }
- }
-}
-
-
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
@@ -3454,7 +3416,7 @@ ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -3495,7 +3457,7 @@ ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
kInstanceCallHandlerOffset)
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
-ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3509,17 +3471,18 @@ ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
-ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
+ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
+ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
-ACCESSORS(Script, type, Smi, kTypeOffset)
-ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, type, kTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS(Script, eval_from_instructions_offset, Smi,
- kEvalFrominstructionsOffsetOffset)
+ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
+ kEvalFrominstructionsOffsetOffset)
#ifdef ENABLE_DEBUGGER_SUPPORT
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -3527,9 +3490,9 @@ ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
-ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
-ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
@@ -4167,7 +4130,7 @@ ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
-
+INT_ACCESSORS(Code, ic_age, kICAgeOffset)
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
@@ -4463,7 +4426,11 @@ bool StringHasher::has_trivial_hash() {
}
-void StringHasher::AddCharacter(uc32 c) {
+void StringHasher::AddCharacter(uint32_t c) {
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ AddSurrogatePair(c); // Not inlined.
+ return;
+ }
// Use the Jenkins one-at-a-time hash function to update the hash
// for the given character.
raw_running_hash_ += c;
@@ -4492,8 +4459,12 @@ void StringHasher::AddCharacter(uc32 c) {
}
-void StringHasher::AddCharacterNoIndex(uc32 c) {
+void StringHasher::AddCharacterNoIndex(uint32_t c) {
ASSERT(!is_array_index());
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ AddSurrogatePairNoIndex(c); // Not inlined.
+ return;
+ }
raw_running_hash_ += c;
raw_running_hash_ += (raw_running_hash_ << 10);
raw_running_hash_ ^= (raw_running_hash_ >> 6);
@@ -4742,6 +4713,7 @@ void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
+ // - IncrementalMarking::Step
ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
}
@@ -4933,22 +4905,27 @@ void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
#undef SLOT_ADDR
-
+#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
-#undef SMI_ACCESSORS
#undef ACCESSORS
+#undef ACCESSORS_TO_SMI
+#undef SMI_ACCESSORS
+#undef BOOL_GETTER
+#undef BOOL_ACCESSORS
#undef FIELD_ADDR
#undef READ_FIELD
#undef WRITE_FIELD
#undef WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
-#undef READ_MEMADDR_FIELD
-#undef WRITE_MEMADDR_FIELD
#undef READ_DOUBLE_FIELD
#undef WRITE_DOUBLE_FIELD
#undef READ_INT_FIELD
#undef WRITE_INT_FIELD
+#undef READ_INTPTR_FIELD
+#undef WRITE_INTPTR_FIELD
+#undef READ_UINT32_FIELD
+#undef WRITE_UINT32_FIELD
#undef READ_SHORT_FIELD
#undef WRITE_SHORT_FIELD
#undef READ_BYTE_FIELD
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index e0a95372b4..64d85a0685 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -288,7 +288,7 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
bool has_pending_exception;
Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception);
+ Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *result;
@@ -4324,21 +4324,20 @@ void JSObject::LookupCallback(String* name, LookupResult* result) {
static bool UpdateGetterSetterInDictionary(
SeededNumberDictionary* dictionary,
uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
- // TODO(mstarzinger): We should check for details.IsDontDelete() here once
- // we only call into the runtime once to set both getter and setter.
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
+ ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(entry,
PropertyDetails(attributes, CALLBACKS, index));
}
- AccessorPair::cast(result)->set(component, fun);
+ AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
}
}
@@ -4347,8 +4346,8 @@ static bool UpdateGetterSetterInDictionary(
MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
@@ -4369,8 +4368,8 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
case DICTIONARY_ELEMENTS:
if (UpdateGetterSetterInDictionary(element_dictionary(),
index,
- component,
- fun,
+ getter,
+ setter,
attributes)) {
return GetHeap()->undefined_value();
}
@@ -4390,8 +4389,8 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
SeededNumberDictionary::cast(arguments);
if (UpdateGetterSetterInDictionary(dictionary,
index,
- component,
- fun,
+ getter,
+ setter,
attributes)) {
return GetHeap()->undefined_value();
}
@@ -4405,23 +4404,22 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
{ MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
}
- accessors->set(component, fun);
+ accessors->SetComponents(getter, setter);
return SetElementCallback(index, accessors, attributes);
}
MaybeObject* JSObject::DefinePropertyAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
// Lookup the name.
LookupResult result(GetHeap()->isolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsFound()) {
- // TODO(mstarzinger): We should check for result.IsDontDelete() here once
- // we only call into the runtime once to set both getter and setter.
if (result.type() == CALLBACKS) {
+ ASSERT(!result.IsDontDelete());
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
if (obj->IsAccessorPair()) {
@@ -4430,7 +4428,7 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
AccessorPair::cast(obj)->CopyWithoutTransitions();
if (!maybe_copy->To(&copy)) return maybe_copy;
}
- copy->set(component, fun);
+ copy->SetComponents(getter, setter);
// Use set to update attributes.
return SetPropertyCallback(name, copy, attributes);
}
@@ -4441,7 +4439,7 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
{ MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
}
- accessors->set(component, fun);
+ accessors->SetComponents(getter, setter);
return SetPropertyCallback(name, accessors, attributes);
}
@@ -4512,12 +4510,6 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index,
MaybeObject* JSObject::SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
- bool convert_back_to_fast = HasFastProperties() &&
- (map()->instance_descriptors()->number_of_descriptors()
- < DescriptorArray::kMaxNumberOfDescriptors);
-
// Normalize object to make this operation simple.
{ MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (maybe_ok->IsFailure()) return maybe_ok;
@@ -4538,22 +4530,29 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
}
// Update the dictionary with the new CALLBACKS property.
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
{ MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
}
- if (convert_back_to_fast) {
- MaybeObject* maybe_ok = TransformToFastProperties(0);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
return GetHeap()->undefined_value();
}
+
+void JSObject::DefineAccessor(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->DefineAccessor(*name, *getter, *setter, attributes));
+}
+
MaybeObject* JSObject::DefineAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
- ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -4566,8 +4565,8 @@ MaybeObject* JSObject::DefineAccessor(String* name,
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(name, component,
- fun, attributes);
+ return JSObject::cast(proto)->DefineAccessor(
+ name, getter, setter, attributes);
}
// Make sure that the top context does not change when doing callbacks or
@@ -4581,8 +4580,8 @@ MaybeObject* JSObject::DefineAccessor(String* name,
uint32_t index = 0;
return name->AsArrayIndex(&index) ?
- DefineElementAccessor(index, component, fun, attributes) :
- DefinePropertyAccessor(name, component, fun, attributes);
+ DefineElementAccessor(index, getter, setter, attributes) :
+ DefinePropertyAccessor(name, getter, setter, attributes);
}
@@ -4696,7 +4695,7 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
Object* element = dictionary->ValueAt(entry);
if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
element->IsAccessorPair()) {
- return AccessorPair::cast(element)->SafeGet(component);
+ return AccessorPair::cast(element)->GetComponent(component);
}
}
}
@@ -4712,7 +4711,7 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->SafeGet(component);
+ return AccessorPair::cast(obj)->GetComponent(component);
}
}
}
@@ -5949,8 +5948,8 @@ MaybeObject* AccessorPair::CopyWithoutTransitions() {
}
-Object* AccessorPair::SafeGet(AccessorComponent component) {
- Object* accessor = get(component);
+Object* AccessorPair::GetComponent(AccessorComponent component) {
+ Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
}
@@ -6051,9 +6050,11 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
buffer->Reset(offset, this);
int character_position = offset;
int utf8_bytes = 0;
+ int last = unibrow::Utf16::kNoPreviousCharacter;
while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
- utf8_bytes += unibrow::Utf8::Length(character);
+ utf8_bytes += unibrow::Utf8::Length(character, last);
+ last = character;
}
if (length_return) {
@@ -6067,13 +6068,15 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
buffer->Seek(offset);
character_position = offset;
int utf8_byte_position = 0;
+ last = unibrow::Utf16::kNoPreviousCharacter;
while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
if (allow_nulls == DISALLOW_NULLS && character == 0) {
character = ' ';
}
utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character);
+ unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
+ last = character;
}
result[utf8_byte_position] = 0;
return SmartArrayPointer<char>(result);
@@ -6387,73 +6390,6 @@ const unibrow::byte* String::ReadBlock(String* input,
}
-// This method determines the type of string involved and then gets the UTF8
-// length of the string. It doesn't flatten the string and has log(n) recursion
-// for a string of length n.
-int String::Utf8Length(String* input, int from, int to) {
- if (from == to) return 0;
- int total = 0;
- while (true) {
- if (input->IsAsciiRepresentation()) return total + to - from;
- switch (StringShape(input).representation_tag()) {
- case kConsStringTag: {
- ConsString* str = ConsString::cast(input);
- String* first = str->first();
- String* second = str->second();
- int first_length = first->length();
- if (first_length - from < to - first_length) {
- if (first_length > from) {
- // Left hand side is shorter.
- total += Utf8Length(first, from, first_length);
- input = second;
- from = 0;
- to -= first_length;
- } else {
- // We only need the right hand side.
- input = second;
- from -= first_length;
- to -= first_length;
- }
- } else {
- if (first_length <= to) {
- // Right hand side is shorter.
- total += Utf8Length(second, 0, to - first_length);
- input = first;
- to = first_length;
- } else {
- // We only need the left hand side.
- input = first;
- }
- }
- continue;
- }
- case kExternalStringTag:
- case kSeqStringTag: {
- Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
- const uc16* p = vector.start();
- for (int i = from; i < to; i++) {
- total += unibrow::Utf8::Length(p[i]);
- }
- return total;
- }
- case kSlicedStringTag: {
- SlicedString* str = SlicedString::cast(input);
- int offset = str->offset();
- input = str->parent();
- from += offset;
- to += offset;
- continue;
- }
- default:
- break;
- }
- UNREACHABLE();
- return 0;
- }
- return 0;
-}
-
-
void Relocatable::PostGarbageCollectionProcessing() {
Isolate* isolate = Isolate::Current();
Relocatable* current = isolate->relocatable_top();
@@ -6847,8 +6783,10 @@ static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
// General slow case check. We know that the ia and ib iterators
// have the same length.
while (ia->has_more()) {
- uc32 ca = ia->GetNext();
- uc32 cb = ib->GetNext();
+ uint32_t ca = ia->GetNext();
+ uint32_t cb = ib->GetNext();
+ ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
+ ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
if (ca != cb)
return false;
}
@@ -7031,8 +6969,14 @@ bool String::IsEqualTo(Vector<const char> str) {
decoder->Reset(str.start(), str.length());
int i;
for (i = 0; i < slen && decoder->has_more(); i++) {
- uc32 r = decoder->GetNext();
- if (Get(i) != r) return false;
+ uint32_t r = decoder->GetNext();
+ if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ if (i > slen - 1) return false;
+ if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
+ if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
+ } else {
+ if (Get(i) != r) return false;
+ }
}
return i == slen && !decoder->has_more();
}
@@ -7162,6 +7106,22 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
}
+void StringHasher::AddSurrogatePair(uc32 c) {
+ uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
+ AddCharacter(lead);
+ uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
+ AddCharacter(trail);
+}
+
+
+void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
+ uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
+ AddCharacterNoIndex(lead);
+ uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
+ AddCharacterNoIndex(trail);
+}
+
+
uint32_t StringHasher::GetHashField() {
ASSERT(is_valid());
if (length_ <= String::kMaxHashCalcLength) {
@@ -7490,7 +7450,7 @@ bool JSFunction::IsInlineable() {
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
- ASSERT(value->IsJSObject());
+ ASSERT(value->IsJSReceiver());
Heap* heap = GetHeap();
if (has_initial_map()) {
// If the function has allocated the initial map
@@ -7517,11 +7477,11 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
ASSERT(should_have_prototype());
Object* construct_prototype = value;
- // If the value is not a JSObject, store the value in the map's
+ // If the value is not a JSReceiver, store the value in the map's
// constructor field so it can be accessed. Also, set the prototype
// used for constructing objects to the original object prototype.
// See ECMA-262 13.2.2.
- if (!value->IsJSObject()) {
+ if (!value->IsJSReceiver()) {
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
@@ -8487,7 +8447,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
if (!maybe->To(&new_map)) return maybe;
}
- FixedArrayBase* old_elements_raw = elements();
+ FixedArrayBase* old_elements = elements();
ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
@@ -8498,12 +8458,12 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
set_map_and_elements(new_map, new_elements);
} else {
- FixedArray* parameter_map = FixedArray::cast(old_elements_raw);
+ FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
}
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements_raw,
+ PrintElementsTransition(stdout, elements_kind, old_elements,
GetElementsKind(), new_elements);
}
@@ -8536,27 +8496,14 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
FixedArrayBase* old_elements = elements();
- ElementsKind elements_kind(GetElementsKind());
- AssertNoAllocation no_gc;
- if (old_elements->length() != 0) {
- switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
- elems->Initialize(FixedArray::cast(old_elements));
- break;
- }
- case FAST_DOUBLE_ELEMENTS: {
- elems->Initialize(FixedDoubleArray::cast(old_elements));
- break;
- }
- case DICTIONARY_ELEMENTS: {
- elems->Initialize(SeededNumberDictionary::cast(old_elements));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ ElementsKind elements_kind = GetElementsKind();
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+ if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+ set_map_and_elements(new_map, elems);
+ } else {
+ FixedArray* parameter_map = FixedArray::cast(old_elements);
+ parameter_map->set(1, elems);
}
if (FLAG_trace_elements_transitions) {
@@ -8564,11 +8511,6 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
FAST_DOUBLE_ELEMENTS, elems);
}
- ASSERT(new_map->has_fast_double_elements());
- set_map(new_map);
- ASSERT(elems->IsFixedDoubleArray());
- set_elements(elems);
-
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
@@ -10655,7 +10597,7 @@ class Utf8SymbolKey : public HashTableKey {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
- chars_ = buffer.Length();
+ chars_ = buffer.Utf16Length();
hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 7906d14fa3..a9cb8e0db6 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -293,6 +293,8 @@ const int kVariableSizeSentinel = 0;
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
V(POLYMORPHIC_CODE_CACHE_TYPE) \
+ V(TYPE_FEEDBACK_INFO_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
@@ -1625,9 +1627,14 @@ class JSObject: public JSReceiver {
String* name,
bool continue_search);
+ static void DefineAccessor(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
Object* LookupAccessor(String* name, AccessorComponent component);
@@ -2176,13 +2183,13 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefineElementAccessor(
uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
void LookupInDescriptor(String* name, LookupResult* result);
@@ -2336,12 +2343,9 @@ class FixedArray: public FixedArrayBase {
// FixedDoubleArray describes fixed-sized arrays with element type double.
class FixedDoubleArray: public FixedArrayBase {
public:
- inline void Initialize(FixedArray* from);
- inline void Initialize(FixedDoubleArray* from);
- inline void Initialize(SeededNumberDictionary* from);
-
// Setter and getter for elements.
inline double get_scalar(int index);
+ inline int64_t get_representation(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -4176,6 +4180,11 @@ class Code: public HeapObject {
// it is only used by the garbage collector itself.
DECL_ACCESSORS(gc_metadata, Object)
+ // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
+ // at the moment when this object was created.
+ inline void set_ic_age(int count);
+ inline int ic_age();
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -4428,8 +4437,9 @@ class Code: public HeapObject {
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
- static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;
-
+ static const int kICAgeOffset =
+ kGCMetadataOffset + kPointerSize;
+ static const int kFlagsOffset = kICAgeOffset + kIntSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;
@@ -4976,6 +4986,12 @@ class Script: public Struct {
COMPILATION_TYPE_EVAL = 1
};
+ // Script compilation state.
+ enum CompilationState {
+ COMPILATION_STATE_INITIAL = 0,
+ COMPILATION_STATE_COMPILED = 1
+ };
+
// [source]: the script source.
DECL_ACCESSORS(source, Object)
@@ -5007,6 +5023,9 @@ class Script: public Struct {
// [compilation]: how the the script was compiled.
DECL_ACCESSORS(compilation_type, Smi)
+ // [is_compiled]: determines whether the script has already been compiled.
+ DECL_ACCESSORS(compilation_state, Smi)
+
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
@@ -5043,7 +5062,9 @@ class Script: public Struct {
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
- static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
+ static const int kCompilationStateOffset =
+ kCompilationTypeOffset + kPointerSize;
+ static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
@@ -6616,12 +6637,17 @@ class StringHasher {
inline bool has_trivial_hash();
// Add a character to the hash and update the array index calculation.
- inline void AddCharacter(uc32 c);
+ inline void AddCharacter(uint32_t c);
// Adds a character to the hash but does not update the array index
// calculation. This can only be called when it has been verified
// that the input is not an array index.
- inline void AddCharacterNoIndex(uc32 c);
+ inline void AddCharacterNoIndex(uint32_t c);
+
+ // Add a character above 0xffff as a surrogate pair. These can get into
+ // the hasher through the routines that take a UTF-8 string and make a symbol.
+ void AddSurrogatePair(uc32 c);
+ void AddSurrogatePairNoIndex(uc32 c);
// Returns the value to store in the hash field of a string with
// the given length and contents.
@@ -6871,9 +6897,6 @@ class String: public HeapObject {
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
int* length_output = 0);
- inline int Utf8Length() { return Utf8Length(this, 0, length()); }
- static int Utf8Length(String* input, int from, int to);
-
// Return a 16 bit Unicode representation of the string.
// The string should be nearly flat, otherwise the performance of
// of this method may be very bad. Setting robustness_flag to
@@ -6939,7 +6962,7 @@ class String: public HeapObject {
// Max ASCII char code.
static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
- static const int kMaxUC16CharCode = 0xffff;
+ static const int kMaxUtf16CodeUnit = 0xffff;
// Mask constant for checking if a string has a computed hash code
// and if it is an array index. The least significant bit indicates
@@ -7570,9 +7593,6 @@ class Oddball: public HeapObject {
static const byte kUndefined = 5;
static const byte kOther = 6;
- // The ToNumber value of a hidden oddball is a negative smi.
- static const int kLeastHiddenOddballNumber = -5;
-
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
@@ -8037,23 +8057,15 @@ class AccessorPair: public Struct {
MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
- Object* get(AccessorComponent component) {
- ASSERT(component == ACCESSOR_GETTER || component == ACCESSOR_SETTER);
- return (component == ACCESSOR_GETTER) ? getter() : setter();
- }
+ // Note: Returns undefined instead in case of a hole.
+ Object* GetComponent(AccessorComponent component);
- void set(AccessorComponent component, Object* value) {
- ASSERT(component == ACCESSOR_GETTER || component == ACCESSOR_SETTER);
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
+ // Set both components, skipping arguments which are a JavaScript null.
+ void SetComponents(Object* getter, Object* setter) {
+ if (!getter->IsNull()) set_getter(getter);
+ if (!setter->IsNull()) set_setter(setter);
}
- // Same as get, but returns undefined instead of the hole.
- Object* SafeGet(AccessorComponent component);
-
bool ContainsAccessor() {
return IsJSAccessor(getter()) || IsJSAccessor(setter());
}
diff --git a/deps/v8/src/once.cc b/deps/v8/src/once.cc
new file mode 100644
index 0000000000..37fe369fb6
--- /dev/null
+++ b/deps/v8/src/once.cc
@@ -0,0 +1,77 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "atomicops.h"
+#include "checks.h"
+
+namespace v8 {
+namespace internal {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+ AtomicWord state = Acquire_Load(once);
+ // Fast path. The provided function was already executed.
+ if (state == ONCE_STATE_DONE) {
+ return;
+ }
+
+ // The function execution did not complete yet. The once object can be in one
+ // of the two following states:
+ // - UNINITIALIZED: We are the first thread calling this function.
+ // - EXECUTING_FUNCTION: Another thread is already executing the function.
+ //
+ // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+ // atomically.
+ state = Acquire_CompareAndSwap(
+ once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+ if (state == ONCE_STATE_UNINITIALIZED) {
+ // We are the first thread to call this function, so we have to call the
+ // function.
+ init_func(arg);
+ Release_Store(once, ONCE_STATE_DONE);
+ } else {
+ // Another thread has already started executing the function. We need to
+ // wait until it completes the initialization.
+ while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+ ::Sleep(0);
+#else
+ sched_yield();
+#endif
+ state = Acquire_Load(once);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/once.h b/deps/v8/src/once.h
new file mode 100644
index 0000000000..a44b8fafbf
--- /dev/null
+++ b/deps/v8/src/once.h
@@ -0,0 +1,123 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by v8's internal code. Users
+// should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called OnceType.
+// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
+// OnceType.
+// * A function CallOnce(OnceType* once, void (*init_func)()).
+// This function, when invoked multiple times given the same OnceType object,
+// will invoke init_func on the first call only, and will make sure none of
+// the calls return before that first call to init_func has finished.
+//
+// Additionally, the following features are supported:
+// * A macro V8_ONCE_INIT which is expanded into the expression used to
+// initialize a OnceType. This is only useful when clients embed a OnceType
+// into a structure of their own and want to initialize it statically.
+// * The user can provide a parameter which CallOnce() forwards to the
+// user-provided function when it is called. Usage example:
+// CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
+// * This implementation guarantees that OnceType is a POD (i.e. no static
+// initializer generated).
+//
+// This implements a way to perform lazy initialization. It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+// void Init();
+// V8_DECLARE_ONCE(once_init);
+//
+// // Calls Init() exactly once.
+// void InitOnce() {
+// CallOnce(&once_init, &Init);
+// }
+//
+// Note that if CallOnce() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization. In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed. Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef V8_ONCE_H_
+#define V8_ONCE_H_
+
+#include "atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+typedef AtomicWord OnceType;
+
+#define V8_ONCE_INIT 0
+
+#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
+
+enum {
+ ONCE_STATE_UNINITIALIZED = 0,
+ ONCE_STATE_EXECUTING_FUNCTION = 1,
+ ONCE_STATE_DONE = 2
+};
+
+typedef void (*NoArgFunction)();
+typedef void (*PointerArgFunction)(void* arg);
+
+template <typename T>
+struct OneArgFunction {
+ typedef void (*type)(T);
+};
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+
+inline void CallOnce(OnceType* once, NoArgFunction init_func) {
+ if (Acquire_Load(once) != ONCE_STATE_DONE) {
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+ }
+}
+
+
+template <typename Arg>
+inline void CallOnce(OnceType* once,
+ typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+ if (Acquire_Load(once) != ONCE_STATE_DONE) {
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+ static_cast<void*>(arg));
+ }
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ONCE_H_
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index ca8cbb9029..da680411a9 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -258,7 +258,7 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
scanner().literal_ascii_string());
} else {
return isolate()->factory()->LookupTwoByteSymbol(
- scanner().literal_uc16_string());
+ scanner().literal_utf16_string());
}
}
return LookupCachedSymbol(symbol_id);
@@ -279,7 +279,7 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
scanner().literal_ascii_string());
} else {
result = isolate()->factory()->LookupTwoByteSymbol(
- scanner().literal_uc16_string());
+ scanner().literal_utf16_string());
}
symbol_cache_.at(symbol_id) = result;
return result;
@@ -576,12 +576,12 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
// identical calls.
- ExternalTwoByteStringUC16CharacterStream stream(
+ ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
return DoParseProgram(info, source, &zone_scope);
} else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
+ GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
return DoParseProgram(info, source, &zone_scope);
}
@@ -604,10 +604,14 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
- if (!info->is_global() &&
- (info->shared_info().is_null() || info->shared_info()->is_function())) {
- scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
- scope = NewScope(scope, EVAL_SCOPE);
+ if (info->is_eval()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!info->is_global() && (shared.is_null() || shared->is_function())) {
+ scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+ }
+ if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+ scope = NewScope(scope, EVAL_SCOPE);
+ }
}
scope->set_start_position(0);
scope->set_end_position(source->length());
@@ -616,13 +620,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
- ParseSourceElements(body, Token::EOS, &ok);
+ ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
if (ok && !top_scope_->is_classic_mode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(scope, &ok);
+ CheckConflictingVarDeclarations(top_scope_, &ok);
}
if (ok) {
@@ -665,16 +669,16 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
// Initialize parser state.
source->TryFlatten();
if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
+ ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
return result;
} else {
- GenericStringUC16CharacterStream stream(source,
- shared_info->start_position(),
- shared_info->end_position());
+ GenericStringUtf16CharacterStream stream(source,
+ shared_info->start_position(),
+ shared_info->end_position());
FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
return result;
}
@@ -682,7 +686,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
+ Utf16CharacterStream* source,
ZoneScope* zone_scope) {
Handle<SharedFunctionInfo> shared_info = info->shared_info();
scanner_.Initialize(source);
@@ -1096,6 +1100,7 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
+ bool is_eval,
bool* ok) {
// SourceElements ::
// (ModuleElement)* <end_token>
@@ -1138,6 +1143,17 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive->Equals(isolate()->heap()->use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict()->length() + 2) {
+ // TODO(mstarzinger): Global strict eval calls, need their own scope
+ // as specified in ES5 10.4.2(3). The correct fix would be to always
+ // add this scope in DoParseProgram(), but that requires adaptations
+ // all over the code base, so we go with a quick-fix for now.
+ if (is_eval && !top_scope_->is_eval_scope()) {
+ ASSERT(top_scope_->is_global_scope());
+ Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
+ scope->set_start_position(top_scope_->start_position());
+ scope->set_end_position(top_scope_->end_position());
+ top_scope_ = scope;
+ }
// TODO(ES6): Fix entering extended mode, once it is specified.
top_scope_->SetLanguageMode(FLAG_harmony_scoping
? EXTENDED_MODE : STRICT_MODE);
@@ -4285,7 +4301,7 @@ class SingletonLogger : public ParserRecorder {
// Logs a symbol creation of a literal or identifier.
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+ virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -4548,7 +4564,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
factory()->NewThisFunction(),
RelocInfo::kNoPosition)));
}
- ParseSourceElements(body, Token::RBRACE, CHECK_OK);
+ ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -5874,7 +5890,7 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
+static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
int flags,
ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
@@ -5915,17 +5931,17 @@ ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
PartialParserRecorder recorder;
int source_length = source->length();
if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
+ ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source_length);
return DoPreParse(&stream, flags, &recorder);
} else {
- GenericStringUC16CharacterStream stream(source, 0, source_length);
+ GenericStringUtf16CharacterStream stream(source, 0, source_length);
return DoPreParse(&stream, flags, &recorder);
}
}
-ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
+ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags) {
Handle<Script> no_script;
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 90ef39983e..b4d88255f7 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -172,7 +172,7 @@ class ParserApi {
static bool Parse(CompilationInfo* info, int flags);
// Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(UC16CharacterStream* source,
+ static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags);
@@ -542,7 +542,7 @@ class Parser {
FunctionLiteral* ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
+ Utf16CharacterStream* source,
ZoneScope* zone_scope);
Isolate* isolate() { return isolate_; }
@@ -580,7 +580,7 @@ class Parser {
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
void* ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token, bool* ok);
+ int end_token, bool is_eval, bool* ok);
Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
Module* ParseModule(bool* ok);
@@ -712,7 +712,7 @@ class Parser {
scanner().literal_ascii_string(), tenured);
} else {
return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_uc16_string(), tenured);
+ scanner().literal_utf16_string(), tenured);
}
}
@@ -722,7 +722,7 @@ class Parser {
scanner().next_literal_ascii_string(), tenured);
} else {
return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_uc16_string(), tenured);
+ scanner().next_literal_utf16_string(), tenured);
}
}
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 2dc1ed89db..fa6fce024b 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -628,7 +628,7 @@ class SamplerThread : public Thread {
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -639,7 +639,7 @@ class SamplerThread : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -725,7 +725,7 @@ class SamplerThread : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SamplerThread* instance_;
private:
@@ -733,7 +733,7 @@ class SamplerThread : public Thread {
};
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
SamplerThread* SamplerThread::instance_ = NULL;
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index f6a426ff44..2a9e174e35 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -723,7 +723,7 @@ class SignalSender : public Thread {
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Install a signal handler.
@@ -743,7 +743,7 @@ class SignalSender : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -836,7 +836,7 @@ class SignalSender : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -845,7 +845,7 @@ class SignalSender : public Thread {
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index cfcbd913fc..08f4495b53 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -388,6 +388,9 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
+ if (FLAG_break_on_abort) {
+ DebugBreak();
+ }
abort();
}
@@ -1090,7 +1093,7 @@ class SignalSender : public Thread {
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -1103,7 +1106,7 @@ class SignalSender : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1206,7 +1209,7 @@ class SignalSender : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -1216,7 +1219,7 @@ class SignalSender : public Thread {
};
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 89abf398c4..bfcaab0b51 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -746,7 +746,7 @@ class SamplerThread : public Thread {
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -757,7 +757,7 @@ class SamplerThread : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -854,7 +854,7 @@ class SamplerThread : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SamplerThread* instance_;
private:
@@ -864,7 +864,7 @@ class SamplerThread : public Thread {
#undef REGISTER_FIELD
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
SamplerThread* SamplerThread::instance_ = NULL;
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 0d6997190c..b79cb71a50 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -812,7 +812,7 @@ class SignalSender : public Thread {
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -825,7 +825,7 @@ class SignalSender : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -919,7 +919,7 @@ class SignalSender : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -929,7 +929,7 @@ class SignalSender : public Thread {
};
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 4543a66e8c..a729b66260 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -127,27 +127,25 @@ double modulo(double x, double y) {
}
-static Mutex* transcendental_function_mutex = OS::CreateMutex();
-
-#define TRANSCENDENTAL_FUNCTION(name, type) \
-static TranscendentalFunction fast_##name##_function = NULL; \
-double fast_##name(double x) { \
- if (fast_##name##_function == NULL) { \
- ScopedLock lock(transcendental_function_mutex); \
- TranscendentalFunction temp = \
- CreateTranscendentalFunction(type); \
- MemoryBarrier(); \
- fast_##name##_function = temp; \
- } \
- return (*fast_##name##_function)(x); \
+#define UNARY_MATH_FUNCTION(name, generator) \
+static UnaryMathFunction fast_##name##_function = NULL; \
+V8_DECLARE_ONCE(fast_##name##_init_once); \
+void init_fast_##name##_function() { \
+ fast_##name##_function = generator; \
+} \
+double fast_##name(double x) { \
+ CallOnce(&fast_##name##_init_once, \
+ &init_fast_##name##_function); \
+ return (*fast_##name##_function)(x); \
}
-TRANSCENDENTAL_FUNCTION(sin, TranscendentalCache::SIN)
-TRANSCENDENTAL_FUNCTION(cos, TranscendentalCache::COS)
-TRANSCENDENTAL_FUNCTION(tan, TranscendentalCache::TAN)
-TRANSCENDENTAL_FUNCTION(log, TranscendentalCache::LOG)
+UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
+UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
+UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
+UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-#undef TRANSCENDENTAL_FUNCTION
+#undef MATH_FUNCTION
double OS::nan_value() {
@@ -307,14 +305,14 @@ int OS::VSNPrintF(Vector<char> str,
#if defined(V8_TARGET_ARCH_IA32)
static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
// Defined in codegen-ia32.cc.
OS::MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
void OS::MemCopy(void* dest, const void* src, size_t size) {
if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex);
+ ScopedLock lock(memcopy_function_mutex.Pointer());
if (memcopy_function == NULL) {
OS::MemCopyFunction temp = CreateMemCopyFunction();
MemoryBarrier();
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 004a6ed428..50ad353392 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -733,7 +733,7 @@ class SignalSender : public Thread {
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -746,7 +746,7 @@ class SignalSender : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -840,7 +840,7 @@ class SignalSender : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -849,7 +849,7 @@ class SignalSender : public Thread {
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 53915c6c0e..2801b711bf 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -149,14 +149,14 @@ static Mutex* limit_mutex = NULL;
#if defined(V8_TARGET_ARCH_IA32)
static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
// Defined in codegen-ia32.cc.
OS::MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
void OS::MemCopy(void* dest, const void* src, size_t size) {
if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex);
+ ScopedLock lock(memcopy_function_mutex.Pointer());
if (memcopy_function == NULL) {
OS::MemCopyFunction temp = CreateMemCopyFunction();
MemoryBarrier();
@@ -175,19 +175,16 @@ void OS::MemCopy(void* dest, const void* src, size_t size) {
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
static ModuloFunction modulo_function = NULL;
-static Mutex* modulo_function_mutex = OS::CreateMutex();
+V8_DECLARE_ONCE(modulo_function_init_once);
// Defined in codegen-x64.cc.
ModuloFunction CreateModuloFunction();
+void init_modulo_function() {
+ modulo_function = CreateModuloFunction();
+}
+
double modulo(double x, double y) {
- if (modulo_function == NULL) {
- ScopedLock lock(modulo_function_mutex);
- if (modulo_function == NULL) {
- ModuloFunction temp = CreateModuloFunction();
- MemoryBarrier();
- modulo_function = temp;
- }
- }
+ CallOnce(&modulo_function_init_once, &init_modulo_function);
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
return (*modulo_function)(x, y);
@@ -208,27 +205,25 @@ double modulo(double x, double y) {
#endif // _WIN64
-static Mutex* transcendental_function_mutex = OS::CreateMutex();
-
-#define TRANSCENDENTAL_FUNCTION(name, type) \
-static TranscendentalFunction fast_##name##_function = NULL; \
-double fast_##name(double x) { \
- if (fast_##name##_function == NULL) { \
- ScopedLock lock(transcendental_function_mutex); \
- TranscendentalFunction temp = \
- CreateTranscendentalFunction(type); \
- MemoryBarrier(); \
- fast_##name##_function = temp; \
- } \
- return (*fast_##name##_function)(x); \
+#define UNARY_MATH_FUNCTION(name, generator) \
+static UnaryMathFunction fast_##name##_function = NULL; \
+V8_DECLARE_ONCE(fast_##name##_init_once); \
+void init_fast_##name##_function() { \
+ fast_##name##_function = generator; \
+} \
+double fast_##name(double x) { \
+ CallOnce(&fast_##name##_init_once, \
+ &init_fast_##name##_function); \
+ return (*fast_##name##_function)(x); \
}
-TRANSCENDENTAL_FUNCTION(sin, TranscendentalCache::SIN)
-TRANSCENDENTAL_FUNCTION(cos, TranscendentalCache::COS)
-TRANSCENDENTAL_FUNCTION(tan, TranscendentalCache::TAN)
-TRANSCENDENTAL_FUNCTION(log, TranscendentalCache::LOG)
+UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
+UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
+UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
+UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-#undef TRANSCENDENTAL_FUNCTION
+#undef MATH_FUNCTION
// ----------------------------------------------------------------------------
@@ -966,11 +961,11 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- if (!IsDebuggerPresent()) {
+ if (IsDebuggerPresent() || FLAG_break_on_abort) {
+ DebugBreak();
+ } else {
// Make the MSVCRT do a silent abort.
raise(SIGABRT);
- } else {
- DebugBreak();
}
}
@@ -1961,7 +1956,7 @@ class SamplerThread : public Thread {
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -1972,7 +1967,7 @@ class SamplerThread : public Thread {
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
+ ScopedLock lock(mutex_.Pointer());
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -2058,7 +2053,7 @@ class SamplerThread : public Thread {
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static Mutex* mutex_;
+ static LazyMutex mutex_;
static SamplerThread* instance_;
private:
@@ -2066,7 +2061,7 @@ class SamplerThread : public Thread {
};
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
SamplerThread* SamplerThread::instance_ = NULL;
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index ccb4109b22..4ec6057c5d 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -79,6 +79,7 @@ int random();
#endif // WIN32
#include "atomicops.h"
+#include "lazy-instance.h"
#include "platform-tls.h"
#include "utils.h"
#include "v8globals.h"
@@ -101,6 +102,7 @@ double fast_sin(double input);
double fast_cos(double input);
double fast_tan(double input);
double fast_log(double input);
+double fast_sqrt(double input);
// Forward declarations.
class Socket;
@@ -528,6 +530,24 @@ class Mutex {
virtual bool TryLock() = 0;
};
+struct CreateMutexTrait {
+ static Mutex* Create() {
+ return OS::CreateMutex();
+ }
+};
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// ScopedLock my_lock(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyDynamicInstance<Mutex, CreateMutexTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
// ----------------------------------------------------------------------------
// ScopedLock
@@ -577,6 +597,30 @@ class Semaphore {
virtual void Signal() = 0;
};
+template <int InitialValue>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return OS::CreateSemaphore(InitialValue);
+ }
+};
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+template <int InitialValue>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<
+ Semaphore, CreateSemaphoreTrait<InitialValue> >::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
// ----------------------------------------------------------------------------
// Socket
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index c77a47a10c..f347430208 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -53,7 +53,7 @@ class ParserRecorder {
// Logs a symbol creation of a literal or identifier.
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+ virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -149,7 +149,7 @@ class PartialParserRecorder : public FunctionLoggingParserRecorder {
public:
PartialParserRecorder() : FunctionLoggingParserRecorder() { }
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+ virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
virtual ~PartialParserRecorder() { }
virtual Vector<unsigned> ExtractData();
virtual int symbol_position() { return 0; }
@@ -171,7 +171,7 @@ class CompleteParserRecorder: public FunctionLoggingParserRecorder {
LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
}
- virtual void LogUC16Symbol(int start, Vector<const uc16> literal) {
+ virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
if (!is_recording_) return;
int hash = vector_hash(literal);
LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
index 1bca9a3333..6e8556aa14 100644
--- a/deps/v8/src/preparser-api.cc
+++ b/deps/v8/src/preparser-api.cc
@@ -46,10 +46,10 @@ namespace v8 {
namespace internal {
// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUTF16Buffer : public UC16CharacterStream {
+class InputStreamUtf16Buffer : public Utf16CharacterStream {
public:
- /* The InputStreamUTF16Buffer maintains an internal buffer
- * that is filled in chunks from the UC16CharacterStream.
+ /* The InputStreamUtf16Buffer maintains an internal buffer
+ * that is filled in chunks from the Utf16CharacterStream.
* It also maintains unlimited pushback capability, but optimized
* for small pushbacks.
* The pushback_buffer_ pointer points to the limit of pushbacks
@@ -60,8 +60,8 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
* new buffer. When this buffer is read to the end again, the cursor is
* switched back to the internal buffer
*/
- explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
- : UC16CharacterStream(),
+ explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
+ : Utf16CharacterStream(),
stream_(stream),
pushback_buffer_(buffer_),
pushback_buffer_end_cache_(NULL),
@@ -70,7 +70,7 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
}
- virtual ~InputStreamUTF16Buffer() {
+ virtual ~InputStreamUtf16Buffer() {
if (pushback_buffer_backing_ != NULL) {
DeleteArray(pushback_buffer_backing_);
}
@@ -127,12 +127,18 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
uc16* buffer_start = buffer_ + kPushBackSize;
buffer_cursor_ = buffer_end_ = buffer_start;
while ((value = stream_->Next()) >= 0) {
- if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
- value = unibrow::Utf8::kBadChar;
+ if (value >
+ static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ buffer_start[buffer_end_++ - buffer_start] =
+ unibrow::Utf16::LeadSurrogate(value);
+ buffer_start[buffer_end_++ - buffer_start] =
+ unibrow::Utf16::TrailSurrogate(value);
+ } else {
+ // buffer_end_ is a const pointer, but buffer_ is writable.
+ buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
}
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
+ // Stop one before the end of the buffer in case we get a surrogate pair.
+ if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
}
return buffer_end_ > buffer_start;
}
@@ -179,7 +185,7 @@ UnicodeInputStream::~UnicodeInputStream() { }
PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUTF16Buffer buffer(input);
+ internal::InputStreamUtf16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
internal::UnicodeCache unicode_cache;
internal::Scanner scanner(&unicode_cache);
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index b36f4faca4..20d3b9c59c 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -1214,7 +1214,7 @@ void PreParser::CheckDuplicate(DuplicateFinder* finder,
old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
type);
} else {
- old_type = finder->AddUC16Symbol(scanner_->literal_uc16_string(), type);
+ old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
}
if (HasConflict(old_type, type)) {
if (IsDataDataConflict(old_type, type)) {
@@ -1387,7 +1387,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
} else {
prev_value =
- duplicate_finder.AddUC16Symbol(scanner_->literal_uc16_string(), 1);
+ duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
}
if (prev_value != 0) {
@@ -1485,7 +1485,7 @@ void PreParser::LogSymbol() {
if (scanner_->is_literal_ascii()) {
log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
} else {
- log_->LogUC16Symbol(identifier_pos, scanner_->literal_uc16_string());
+ log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
}
}
@@ -1657,7 +1657,7 @@ int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
return AddSymbol(i::Vector<const byte>::cast(key), true, value);
}
-int DuplicateFinder::AddUC16Symbol(i::Vector<const uint16_t> key, int value) {
+int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
return AddSymbol(i::Vector<const byte>::cast(key), false, value);
}
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 1455561bbd..f3a43475df 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -65,7 +65,7 @@ class DuplicateFinder {
map_(&Match) { }
int AddAsciiSymbol(i::Vector<const char> key, int value);
- int AddUC16Symbol(i::Vector<const uint16_t> key, int value);
+ int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
// Add a a number literal by converting it (if necessary)
// to the string that ToString(ToNumber(literal)) would generate.
// and then adding that string with AddAsciiSymbol.
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index d967ed3897..65369befdf 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -114,15 +114,6 @@ int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
HeapObjectsMap::kObjectIdStep);
}
-
-SnapshotObjectId HeapEntry::id() {
- union {
- Id stored_id;
- SnapshotObjectId returned_id;
- } id_adaptor = {id_};
- return id_adaptor.returned_id;
-}
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index ca975652af..2d0984ecbf 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -978,12 +978,7 @@ void HeapEntry::Init(HeapSnapshot* snapshot,
children_count_ = children_count;
retainers_count_ = retainers_count;
dominator_ = NULL;
-
- union {
- SnapshotObjectId set_id;
- Id stored_id;
- } id_adaptor = {id};
- id_ = id_adaptor.stored_id;
+ id_ = id;
}
@@ -1113,7 +1108,7 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 36;
+ static const int kExpectedHeapEntrySize = 32;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
@@ -1139,10 +1134,10 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
natives_root_entry_(NULL),
raw_entries_(NULL),
entries_sorted_(false) {
- STATIC_ASSERT(
+ STATIC_CHECK(
sizeof(HeapGraphEdge) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
- STATIC_ASSERT(
+ STATIC_CHECK(
sizeof(HeapEntry) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index fadae7e28d..d9a1319b87 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -544,7 +544,7 @@ class HeapEntry BASE_EMBEDDED {
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
- inline SnapshotObjectId id();
+ inline SnapshotObjectId id() { return id_; }
int self_size() { return self_size_; }
int retained_size() { return retained_size_; }
void add_retained_size(int size) { retained_size_ += size; }
@@ -608,12 +608,9 @@ class HeapEntry BASE_EMBEDDED {
int ordered_index_; // Used during dominator tree building.
int retained_size_; // At that moment, there is no retained size yet.
};
+ SnapshotObjectId id_;
HeapEntry* dominator_;
HeapSnapshot* snapshot_;
- struct Id {
- uint32_t id1_;
- uint32_t id2_;
- } id_; // This is to avoid extra padding of 64-bit value.
const char* name_;
DISALLOW_COPY_AND_ASSIGN(HeapEntry);
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index ace0be1564..bc9508d817 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -250,29 +250,32 @@ function RegExpTest(string) {
// Remove irrelevant preceeding '.*' in a non-global test regexp.
// The expression checks whether this.source starts with '.*' and
// that the third char is not a '?'.
- if (%_StringCharCodeAt(this.source, 0) == 46 && // '.'
- %_StringCharCodeAt(this.source, 1) == 42 && // '*'
- %_StringCharCodeAt(this.source, 2) != 63) { // '?'
- if (!%_ObjectEquals(regexp_key, this)) {
- regexp_key = this;
- regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
- (!this.ignoreCase
- ? !this.multiline ? "" : "m"
- : !this.multiline ? "i" : "im"));
- }
- if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
- return false;
- }
+ var regexp = this;
+ if (%_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
+ %_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
+ %_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
+ regexp = TrimRegExp(regexp);
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
+ %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
+ var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
if (matchIndices === null) return false;
lastMatchInfoOverride = null;
return true;
}
}
+function TrimRegExp(regexp) {
+ if (!%_ObjectEquals(regexp_key, regexp)) {
+ regexp_key = regexp;
+ regexp_val =
+ new $RegExp(SubString(regexp.source, 2, regexp.source.length),
+ (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
+ : regexp.multiline ? "m" : ""));
+ }
+ return regexp_val;
+}
+
function RegExpToString() {
// If this.source is an empty string, output /(?:)/.
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 70586aa06b..6ed4ff483a 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -72,9 +72,9 @@ static const int kMaxSizeEarlyOpt = 500;
Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+
+// TODO(isolates): Clean up the semaphore when it is no longer required.
+static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
#ifdef DEBUG
bool RuntimeProfiler::has_been_globally_set_up_ = false;
@@ -173,7 +173,9 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// prepared to generate it, but we don't expect to have to.
bool found_code = false;
Code* stack_check_code = NULL;
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
@@ -268,6 +270,7 @@ void RuntimeProfiler::OptimizeNow() {
// Do not record non-optimizable functions.
if (!function->IsOptimizable()) continue;
+ if (function->shared()->optimization_disabled()) continue;
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
@@ -289,7 +292,12 @@ void RuntimeProfiler::OptimizeNow() {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
+ } else if (ticks >= 100) {
+ // If this function does not have enough type info, but has
+ // seen a huge number of ticks, optimize it as it is.
+ Optimize(function, "not much type info but very hot");
} else {
+ function->shared()->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -343,7 +351,9 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::NotifyTick() {
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) return;
#endif
isolate_->stack_guard()->RequestRuntimeProfilerTick();
@@ -406,7 +416,7 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
// undid the decrement done by the profiler thread. Increment again
// to get the right count of active isolates.
NoBarrier_AtomicIncrement(&state_, 1);
- semaphore_->Signal();
+ semaphore.Pointer()->Signal();
}
@@ -419,7 +429,7 @@ bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
ASSERT(old_state >= -1);
if (old_state != 0) return false;
- semaphore_->Wait();
+ semaphore.Pointer()->Wait();
return true;
}
@@ -435,7 +445,7 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
if (new_state == 0) {
// The profiler thread is waiting. Wake it up. It must check for
// stop conditions before attempting to wait again.
- semaphore_->Signal();
+ semaphore.Pointer()->Signal();
}
thread->Join();
// The profiler thread is now stopped. Undo the increment in case it
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index f7ca3f020d..e3388492cb 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -136,7 +136,6 @@ class RuntimeProfiler {
// -1 => the profiler thread is waiting on the semaphore
// 0 or positive => the number of isolates running JavaScript code.
static Atomic32 state_;
- static Semaphore* semaphore_;
#ifdef DEBUG
static bool has_been_globally_set_up_;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index cc5aeab78f..320ab59f3c 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -1065,10 +1065,10 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
AccessorPair::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, heap->true_value());
if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->SafeGet(ACCESSOR_GETTER));
+ elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
}
if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->SafeGet(ACCESSOR_SETTER));
+ elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
}
break;
}
@@ -1115,10 +1115,10 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->SafeGet(ACCESSOR_GETTER));
+ elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
}
if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->SafeGet(ACCESSOR_SETTER));
+ elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
}
} else {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1337,6 +1337,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
attr |= READ_ONLY;
}
+ LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+
// Safari does not allow the invocation of callback setters for
// function declarations. To mimic this behavior, we do not allow
// the invocation of setters for function values. This makes a
@@ -1344,9 +1346,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// handlers such as "function onload() {}". Firefox does call the
// onload setter in those case and Safari does not. We follow
// Safari for compatibility.
- if (value->IsJSFunction()) {
- // Do not change DONT_DELETE to false from true.
+ if (is_function_declaration) {
if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
+ // Do not overwrite READ_ONLY properties.
+ if (lookup.GetAttributes() & READ_ONLY) {
+ if (language_mode != CLASSIC_MODE) {
+ Handle<Object> args[] = { name };
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_cannot_assign", HandleVector(args, ARRAY_SIZE(args))));
+ }
+ continue;
+ }
+ // Do not change DONT_DELETE to false from true.
attr |= lookup.GetAttributes() & DONT_DELETE;
}
PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
@@ -1356,14 +1367,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
attributes));
} else {
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
RETURN_IF_EMPTY_HANDLE(
isolate,
JSReceiver::SetProperty(global, name, value,
static_cast<PropertyAttributes>(attr),
- strict_mode_flag));
+ language_mode == CLASSIC_MODE
+ ? kNonStrictMode : kStrictMode));
}
}
@@ -4341,26 +4350,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- // TODO(svenpanne) Define getter/setter/attributes in a single step.
- if (getter->IsNull() && setter->IsNull()) {
- JSArray* array;
- { MaybeObject* maybe_array = GetOwnProperty(isolate, obj, name);
- if (!maybe_array->To(&array)) return maybe_array;
- }
- Object* current = FixedArray::cast(array->elements())->get(GETTER_INDEX);
- getter = Handle<Object>(current, isolate);
- }
- if (!getter->IsNull()) {
- MaybeObject* ok =
- obj->DefineAccessor(*name, ACCESSOR_GETTER, *getter, attr);
- if (ok->IsFailure()) return ok;
- }
- if (!setter->IsNull()) {
- MaybeObject* ok =
- obj->DefineAccessor(*name, ACCESSOR_SETTER, *setter, attr);
- if (ok->IsFailure()) return ok;
- }
-
+ bool fast = obj->HasFastProperties();
+ JSObject::DefineAccessor(obj, name, getter, setter, attr);
+ if (fast) JSObject::TransformToFastProperties(obj, 0);
return isolate->heap()->undefined_value();
}
@@ -6760,6 +6752,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
ascii = false;
}
} else {
+ ASSERT(!elt->IsTheHole());
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
if (increment > String::kMaxLength - position) {
@@ -7441,9 +7434,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
if (y == y_int) {
result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
} else if (y == 0.5) {
- result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to +0.
+ result = (isinf(x)) ? V8_INFINITY
+ : fast_sqrt(x + 0.0); // Convert -0 to +0.
} else if (y == -0.5) {
- result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
+ result = (isinf(x)) ? 0
+ : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
} else {
result = power_double_double(x, y);
}
@@ -7529,7 +7524,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
isolate->counters()->math_sqrt()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(sqrt(x));
+ return isolate->heap()->AllocateHeapNumber(fast_sqrt(x));
}
@@ -8112,6 +8107,27 @@ class ActivationsFinder : public ThreadVisitor {
};
+static void MaterializeArgumentsObjectInFrame(Isolate* isolate,
+ JavaScriptFrame* frame) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
+ Handle<Object> arguments;
+ for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
+ if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
+ if (arguments.is_null()) {
+ // FunctionGetArguments can't throw an exception, so cast away the
+ // doubt with an assert.
+ arguments = Handle<Object>(
+ Accessors::FunctionGetArguments(*function,
+ NULL)->ToObjectUnchecked());
+ ASSERT(*arguments != isolate->heap()->null_value());
+ ASSERT(*arguments != isolate->heap()->undefined_value());
+ }
+ frame->SetExpression(i, *arguments);
+ }
+ }
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8126,27 +8142,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
delete deoptimizer;
JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = NULL;
- for (int i = 0; i < jsframes - 1; i++) it.Advance();
- frame = it.frame();
+ for (int i = 0; i < jsframes - 1; i++) {
+ MaterializeArgumentsObjectInFrame(isolate, it.frame());
+ it.Advance();
+ }
+ JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- Handle<Object> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
- if (arguments.is_null()) {
- // FunctionGetArguments can't throw an exception, so cast away the
- // doubt with an assert.
- arguments = Handle<Object>(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked());
- ASSERT(*arguments != isolate->heap()->null_value());
- ASSERT(*arguments != isolate->heap()->undefined_value());
- }
- frame->SetExpression(i, *arguments);
- }
- }
+ MaterializeArgumentsObjectInFrame(isolate, frame);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
@@ -8354,7 +8358,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF("]\n");
}
Handle<Code> check_code;
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
check_code = interrupt_stub.GetCode();
@@ -10202,8 +10208,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
if (hasJavaScriptAccessors) {
AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
details->set(2, isolate->heap()->ToBoolean(caught_exception));
- details->set(3, accessors->SafeGet(ACCESSOR_GETTER));
- details->set(4, accessors->SafeGet(ACCESSOR_SETTER));
+ details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
+ details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
}
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -12257,6 +12263,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
}
+// Patches script source (should be called upon BeforeCompile event).
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
+ Handle<String> source(String::cast(args[1]));
+
+ RUNTIME_ASSERT(script_wrapper->value()->IsScript());
+ Handle<Script> script(Script::cast(script_wrapper->value()));
+
+ int compilation_state = Smi::cast(script->compilation_state())->value();
+ RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
+ script->set_source(*source);
+
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
ASSERT(args.length() == 0);
CPU::DebugBreak();
@@ -13313,6 +13338,7 @@ void Runtime::PerformGC(Object* result) {
if (isolate->heap()->new_space()->AddFreshPage()) {
return;
}
+
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
isolate->heap()->CollectGarbage(failure->allocation_space(),
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index c5ce3c3880..fe9cfd9b2f 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -418,6 +418,7 @@ namespace internal {
F(DebugReferencedBy, 3, 1) \
F(DebugConstructedBy, 2, 1) \
F(DebugGetPrototype, 1, 1) \
+ F(DebugSetScriptSource, 2, 1) \
F(SystemBreak, 0, 1) \
F(DebugDisassembleFunction, 1, 1) \
F(DebugDisassembleConstructor, 1, 1) \
@@ -485,12 +486,13 @@ namespace internal {
F(IsNonNegativeSmi, 1, 1) \
F(IsArray, 1, 1) \
F(IsRegExp, 1, 1) \
+ F(IsConstructCall, 0, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
- F(DateField, 2 /* date object, field index */, 1) \
+ F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
F(ObjectEquals, 2, 1) \
@@ -518,7 +520,6 @@ namespace internal {
// a corresponding runtime function, that is called for slow cases.
// Entries have the form F(name, number of arguments, number of return values).
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
- F(IsConstructCall, 0, 1) \
F(ClassOf, 1, 1) \
F(StringCharCodeAt, 2, 1) \
F(Log, 3, 1) \
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index ee10703c9e..56b9f03aa5 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -36,19 +36,19 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// BufferedUC16CharacterStreams
+// BufferedUtf16CharacterStreams
-BufferedUC16CharacterStream::BufferedUC16CharacterStream()
- : UC16CharacterStream(),
+BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
+ : Utf16CharacterStream(),
pushback_limit_(NULL) {
// Initialize buffer as being empty. First read will fill the buffer.
buffer_cursor_ = buffer_;
buffer_end_ = buffer_;
}
-BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
+BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
-void BufferedUC16CharacterStream::PushBack(uc32 character) {
+void BufferedUtf16CharacterStream::PushBack(uc32 character) {
if (character == kEndOfInput) {
pos_--;
return;
@@ -63,7 +63,7 @@ void BufferedUC16CharacterStream::PushBack(uc32 character) {
}
-void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
+void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
// In pushback mode, the end of the buffer contains pushback,
// and the start of the buffer (from buffer start to pushback_limit_)
// contains valid data that comes just after the pushback.
@@ -89,7 +89,7 @@ void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
}
-bool BufferedUC16CharacterStream::ReadBlock() {
+bool BufferedUtf16CharacterStream::ReadBlock() {
buffer_cursor_ = buffer_;
if (pushback_limit_ != NULL) {
// Leave pushback mode.
@@ -106,7 +106,7 @@ bool BufferedUC16CharacterStream::ReadBlock() {
}
-unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
+unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
// Leave pushback mode (i.e., ignore that there might be valid data
// in the buffer before the pushback_limit_ point).
pushback_limit_ = NULL;
@@ -114,10 +114,10 @@ unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
}
// ----------------------------------------------------------------------------
-// GenericStringUC16CharacterStream
+// GenericStringUtf16CharacterStream
-GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
+GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
Handle<String> data,
unsigned start_position,
unsigned end_position)
@@ -130,10 +130,10 @@ GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
}
-GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
+GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
-unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned GenericStringUtf16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned old_pos = pos_;
pos_ = Min(pos_ + delta, length_);
ReadBlock();
@@ -141,7 +141,7 @@ unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
}
-unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
+unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos,
unsigned length) {
if (from_pos >= length_) return 0;
if (from_pos + length > length_) {
@@ -153,10 +153,10 @@ unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
// ----------------------------------------------------------------------------
-// Utf8ToUC16CharacterStream
-Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
- unsigned length)
- : BufferedUC16CharacterStream(),
+// Utf8ToUtf16CharacterStream
+Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
+ unsigned length)
+ : BufferedUtf16CharacterStream(),
raw_data_(data),
raw_data_length_(length),
raw_data_pos_(0),
@@ -165,10 +165,10 @@ Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
}
-Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
+Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
-unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned old_pos = pos_;
unsigned target_pos = pos_ + delta;
SetRawPosition(target_pos);
@@ -178,9 +178,9 @@ unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
}
-unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
- unsigned length) {
- static const unibrow::uchar kMaxUC16Character = 0xffff;
+unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
+ unsigned length) {
+ static const unibrow::uchar kMaxUtf16Character = 0xffff;
SetRawPosition(char_position);
if (raw_character_position_ != char_position) {
// char_position was not a valid position in the stream (hit the end
@@ -188,7 +188,7 @@ unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
return 0u;
}
unsigned i = 0;
- while (i < length) {
+ while (i < length - 1) {
if (raw_data_pos_ == raw_data_length_) break;
unibrow::uchar c = raw_data_[raw_data_pos_];
if (c <= unibrow::Utf8::kMaxOneByteChar) {
@@ -197,12 +197,13 @@ unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
raw_data_length_ - raw_data_pos_,
&raw_data_pos_);
- // Don't allow characters outside of the BMP.
- if (c > kMaxUC16Character) {
- c = unibrow::Utf8::kBadChar;
- }
}
- buffer_[i++] = static_cast<uc16>(c);
+ if (c > kMaxUtf16Character) {
+ buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
+ buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
+ } else {
+ buffer_[i++] = static_cast<uc16>(c);
+ }
}
raw_character_position_ = char_position + i;
return i;
@@ -266,37 +267,52 @@ static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
}
-void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
+// This can't set a raw position between two surrogate pairs, since there
+// is no position in the UTF8 stream that corresponds to that. This assumes
+// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence. If
+// it is illegally coded as two 3 byte sequences then there is no problem here.
+void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
if (raw_character_position_ > target_position) {
// Spool backwards in utf8 buffer.
do {
+ int old_pos = raw_data_pos_;
Utf8CharacterBack(raw_data_, &raw_data_pos_);
raw_character_position_--;
+ ASSERT(old_pos - raw_data_pos_ <= 4);
+ // Step back over both code units for surrogate pairs.
+ if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
} while (raw_character_position_ > target_position);
+ // No surrogate pair splitting.
+ ASSERT(raw_character_position_ == target_position);
return;
}
// Spool forwards in the utf8 buffer.
while (raw_character_position_ < target_position) {
if (raw_data_pos_ == raw_data_length_) return;
+ int old_pos = raw_data_pos_;
Utf8CharacterForward(raw_data_, &raw_data_pos_);
raw_character_position_++;
+ ASSERT(raw_data_pos_ - old_pos <= 4);
+ if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
}
+ // No surrogate pair splitting.
+ ASSERT(raw_character_position_ == target_position);
}
// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUC16CharacterStream
+// ExternalTwoByteStringUtf16CharacterStream
-ExternalTwoByteStringUC16CharacterStream::
- ~ExternalTwoByteStringUC16CharacterStream() { }
+ExternalTwoByteStringUtf16CharacterStream::
+ ~ExternalTwoByteStringUtf16CharacterStream() { }
-ExternalTwoByteStringUC16CharacterStream
- ::ExternalTwoByteStringUC16CharacterStream(
+ExternalTwoByteStringUtf16CharacterStream
+ ::ExternalTwoByteStringUtf16CharacterStream(
Handle<ExternalTwoByteString> data,
int start_position,
int end_position)
- : UC16CharacterStream(),
+ : Utf16CharacterStream(),
source_(data),
raw_data_(data->GetTwoByteData(start_position)) {
buffer_cursor_ = raw_data_,
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index 5c4ea2ca36..319ee8fc1c 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -36,10 +36,10 @@ namespace internal {
// A buffered character stream based on a random access character
// source (ReadBlock can be called with pos_ pointing to any position,
// even positions before the current).
-class BufferedUC16CharacterStream: public UC16CharacterStream {
+class BufferedUtf16CharacterStream: public Utf16CharacterStream {
public:
- BufferedUC16CharacterStream();
- virtual ~BufferedUC16CharacterStream();
+ BufferedUtf16CharacterStream();
+ virtual ~BufferedUtf16CharacterStream();
virtual void PushBack(uc32 character);
@@ -60,12 +60,12 @@ class BufferedUC16CharacterStream: public UC16CharacterStream {
// Generic string stream.
-class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
- GenericStringUC16CharacterStream(Handle<String> data,
- unsigned start_position,
- unsigned end_position);
- virtual ~GenericStringUC16CharacterStream();
+ GenericStringUtf16CharacterStream(Handle<String> data,
+ unsigned start_position,
+ unsigned end_position);
+ virtual ~GenericStringUtf16CharacterStream();
protected:
virtual unsigned BufferSeekForward(unsigned delta);
@@ -77,11 +77,11 @@ class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
};
-// UC16 stream based on a literal UTF-8 string.
-class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
+// Utf16 stream based on a literal UTF-8 string.
+class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
- Utf8ToUC16CharacterStream(const byte* data, unsigned length);
- virtual ~Utf8ToUC16CharacterStream();
+ Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
+ virtual ~Utf8ToUtf16CharacterStream();
protected:
virtual unsigned BufferSeekForward(unsigned delta);
@@ -98,12 +98,12 @@ class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
+class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
public:
- ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position);
- virtual ~ExternalTwoByteStringUC16CharacterStream();
+ ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
+ int start_position,
+ int end_position);
+ virtual ~ExternalTwoByteStringUtf16CharacterStream();
virtual void PushBack(uc32 character) {
ASSERT(buffer_cursor_ > raw_data_);
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 72768b381b..7901b5d826 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -45,7 +45,7 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
harmony_modules_(false) { }
-void Scanner::Initialize(UC16CharacterStream* source) {
+void Scanner::Initialize(Utf16CharacterStream* source) {
source_ = source;
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index e892fe0c1f..045e7d27a6 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -73,15 +73,17 @@ inline int HexValue(uc32 c) {
// ---------------------------------------------------------------------
-// Buffered stream of characters, using an internal UC16 buffer.
+// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
+// A code unit is a 16 bit value representing either a 16 bit code point
+// or one part of a surrogate pair that make a single 21 bit code point.
-class UC16CharacterStream {
+class Utf16CharacterStream {
public:
- UC16CharacterStream() : pos_(0) { }
- virtual ~UC16CharacterStream() { }
+ Utf16CharacterStream() : pos_(0) { }
+ virtual ~Utf16CharacterStream() { }
- // Returns and advances past the next UC16 character in the input
- // stream. If there are no more characters, it returns a negative
+ // Returns and advances past the next UTF-16 code unit in the input
+ // stream. If there are no more code units, it returns a negative
// value.
inline uc32 Advance() {
if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
@@ -90,47 +92,47 @@ class UC16CharacterStream {
}
// Note: currently the following increment is necessary to avoid a
// parser problem! The scanner treats the final kEndOfInput as
- // a character with a position, and does math relative to that
+ // a code unit with a position, and does math relative to that
// position.
pos_++;
return kEndOfInput;
}
- // Return the current position in the character stream.
+ // Return the current position in the code unit stream.
// Starts at zero.
inline unsigned pos() const { return pos_; }
- // Skips forward past the next character_count UC16 characters
+ // Skips forward past the next code_unit_count UTF-16 code units
// in the input, or until the end of input if that comes sooner.
- // Returns the number of characters actually skipped. If less
- // than character_count,
- inline unsigned SeekForward(unsigned character_count) {
+ // Returns the number of code units actually skipped. If less
+ // than code_unit_count,
+ inline unsigned SeekForward(unsigned code_unit_count) {
unsigned buffered_chars =
static_cast<unsigned>(buffer_end_ - buffer_cursor_);
- if (character_count <= buffered_chars) {
- buffer_cursor_ += character_count;
- pos_ += character_count;
- return character_count;
+ if (code_unit_count <= buffered_chars) {
+ buffer_cursor_ += code_unit_count;
+ pos_ += code_unit_count;
+ return code_unit_count;
}
- return SlowSeekForward(character_count);
+ return SlowSeekForward(code_unit_count);
}
- // Pushes back the most recently read UC16 character (or negative
+ // Pushes back the most recently read UTF-16 code unit (or negative
// value if at end of input), i.e., the value returned by the most recent
// call to Advance.
// Must not be used right after calling SeekForward.
- virtual void PushBack(int32_t character) = 0;
+ virtual void PushBack(int32_t code_unit) = 0;
protected:
static const uc32 kEndOfInput = -1;
- // Ensures that the buffer_cursor_ points to the character at
+ // Ensures that the buffer_cursor_ points to the code_unit at
// position pos_ of the input, if possible. If the position
// is at or after the end of the input, return false. If there
- // are more characters available, return true.
+ // are more code_units available, return true.
virtual bool ReadBlock() = 0;
- virtual unsigned SlowSeekForward(unsigned character_count) = 0;
+ virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
const uc16* buffer_cursor_;
const uc16* buffer_end_;
@@ -178,23 +180,24 @@ class LiteralBuffer {
}
}
- INLINE(void AddChar(uc16 character)) {
+ INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_ascii_) {
- if (character < kMaxAsciiCharCodeU) {
- backing_store_[position_] = static_cast<byte>(character);
+ if (code_unit < kMaxAsciiCharCodeU) {
+ backing_store_[position_] = static_cast<byte>(code_unit);
position_ += kASCIISize;
return;
}
- ConvertToUC16();
+ ConvertToUtf16();
}
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
+ ASSERT(code_unit < 0x10000u);
+ *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
position_ += kUC16Size;
}
bool is_ascii() { return is_ascii_; }
- Vector<const uc16> uc16_literal() {
+ Vector<const uc16> utf16_literal() {
ASSERT(!is_ascii_);
ASSERT((position_ & 0x1) == 0);
return Vector<const uc16>(
@@ -236,13 +239,13 @@ class LiteralBuffer {
backing_store_ = new_store;
}
- void ConvertToUC16() {
+ void ConvertToUtf16() {
ASSERT(is_ascii_);
Vector<byte> new_store;
int new_content_size = position_ * kUC16Size;
if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read characters as UC16 as well
- // as the character about to be stored.
+ // Ensure room for all currently read code units as UC16 as well
+ // as the code unit about to be stored.
new_store = Vector<byte>::New(NewCapacity(new_content_size));
} else {
new_store = backing_store_;
@@ -316,7 +319,7 @@ class Scanner {
explicit Scanner(UnicodeCache* scanner_contants);
- void Initialize(UC16CharacterStream* source);
+ void Initialize(Utf16CharacterStream* source);
// Returns the next token and advances input.
Token::Value Next();
@@ -335,9 +338,9 @@ class Scanner {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->ascii_literal();
}
- Vector<const uc16> literal_uc16_string() {
+ Vector<const uc16> literal_utf16_string() {
ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->uc16_literal();
+ return current_.literal_chars->utf16_literal();
}
bool is_literal_ascii() {
ASSERT_NOT_NULL(current_.literal_chars);
@@ -371,9 +374,9 @@ class Scanner {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->ascii_literal();
}
- Vector<const uc16> next_literal_uc16_string() {
+ Vector<const uc16> next_literal_utf16_string() {
ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->uc16_literal();
+ return next_.literal_chars->utf16_literal();
}
bool is_next_literal_ascii() {
ASSERT_NOT_NULL(next_.literal_chars);
@@ -542,8 +545,8 @@ class Scanner {
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
- // Input stream. Must be initialized to an UC16CharacterStream.
- UC16CharacterStream* source_;
+ // Input stream. Must be initialized to an Utf16CharacterStream.
+ Utf16CharacterStream* source_;
// Start position of the octal literal last scanned.
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 4249d369d5..01d5f1c7bc 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -849,13 +849,12 @@ void Deserializer::ReadChunk(Object** current,
if (how == kFromCode) { \
Address location_of_branch_data = \
reinterpret_cast<Address>(current); \
- Assembler::set_target_at(location_of_branch_data, \
- reinterpret_cast<Address>(new_object)); \
- if (within == kFirstInstruction) { \
- location_of_branch_data += Assembler::kCallTargetSize; \
- current = reinterpret_cast<Object**>(location_of_branch_data); \
- current_was_incremented = true; \
- } \
+ Assembler::deserialization_set_special_target_at( \
+ location_of_branch_data, \
+ reinterpret_cast<Address>(new_object)); \
+ location_of_branch_data += Assembler::kSpecialTargetSize; \
+ current = reinterpret_cast<Object**>(location_of_branch_data); \
+ current_was_incremented = true; \
} else { \
*current = new_object; \
} \
@@ -991,6 +990,21 @@ void Deserializer::ReadChunk(Object** current,
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
+#if V8_TARGET_ARCH_MIPS
+ // Deserialize a new object from pointer found in code and write
+ // a pointer to it to the current object. Required only for MIPS, and
+ // omitted on the other architectures because it is fully unrolled and
+ // would cause bloat.
+ ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
+ // Find a recently deserialized code object using its offset from the
+ // current allocation point and write a pointer to it to the current
+ // object. Required only for MIPS.
+ ALL_SPACES(kBackref, kFromCode, kStartOfObject)
+ // Find an already deserialized code object using its offset from
+ // the start and write a pointer to it to the current object.
+ // Required only for MIPS.
+ ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
+#endif
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
// to the current code object or the instruction pointer in a function
@@ -1229,12 +1243,23 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
-int Serializer::RootIndex(HeapObject* heap_object) {
+int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
Heap* heap = HEAP;
if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
- if (!root->IsSmi() && root == heap_object) return i;
+ if (!root->IsSmi() && root == heap_object) {
+#if V8_TARGET_ARCH_MIPS
+ if (from == kFromCode) {
+ // In order to avoid code bloat in the deserializer we don't have
+ // support for the encoding that specifies a particular root should
+ // be written into the lui/ori instructions on MIPS. Therefore we
+ // should not generate such serialization data for MIPS.
+ return kInvalidRootIndex;
+ }
+#endif
+ return i;
+ }
}
return kInvalidRootIndex;
}
@@ -1287,7 +1312,7 @@ void StartupSerializer::SerializeObject(
HeapObject* heap_object = HeapObject::cast(o);
int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
return;
}
@@ -1359,7 +1384,7 @@ void PartialSerializer::SerializeObject(
}
int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
return;
}
@@ -1439,7 +1464,7 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
while (current < end && !(*current)->IsSmi()) {
HeapObject* current_contents = HeapObject::cast(*current);
- int root_index = serializer_->RootIndex(current_contents);
+ int root_index = serializer_->RootIndex(current_contents, kPlain);
// Repeats are not subject to the write barrier so there are only some
// objects that can be used in a repeat encoding. These are the early
// ones in the root array that are never in new space.
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 02bf58a9ee..f50e23eac8 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -485,7 +485,7 @@ class Serializer : public SerializerDeserializer {
protected:
static const int kInvalidRootIndex = -1;
- int RootIndex(HeapObject* heap_object);
+ int RootIndex(HeapObject* heap_object, HowToCode from);
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
intptr_t root_index_wave_front() { return root_index_wave_front_; }
void set_root_index_wave_front(intptr_t value) {
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index d7061a1a79..defe352614 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -594,6 +594,9 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
}
+ isolate_->heap()->RememberUnmappedPage(
+ reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+
delete chunk->slots_buffer();
delete chunk->skip_list();
@@ -2522,6 +2525,10 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
+static bool ComparePointers(void* key1, void* key2) {
+ return key1 == key2;
+}
+
LargeObjectSpace::LargeObjectSpace(Heap* heap,
intptr_t max_capacity,
@@ -2531,7 +2538,8 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap,
first_page_(NULL),
size_(0),
page_count_(0),
- objects_size_(0) {}
+ objects_size_(0),
+ chunk_map_(ComparePointers, 1024) {}
bool LargeObjectSpace::SetUp() {
@@ -2539,6 +2547,7 @@ bool LargeObjectSpace::SetUp() {
size_ = 0;
page_count_ = 0;
objects_size_ = 0;
+ chunk_map_.Clear();
return true;
}
@@ -2582,6 +2591,18 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
+ // Register all MemoryChunk::kAlignment-aligned chunks covered by
+ // this large page in the chunk map.
+ uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+ uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
+ for (uintptr_t key = base; key <= limit; key++) {
+ HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+ static_cast<uint32_t>(key),
+ true);
+ ASSERT(entry != NULL);
+ entry->value = page;
+ }
+
HeapObject* object = page->GetObject();
#ifdef DEBUG
@@ -2598,27 +2619,25 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
- for (LargePage* page = first_page_;
- page != NULL;
- page = page->next_page()) {
- Address page_address = page->address();
- if (page_address <= a && a < page_address + page->size()) {
- return page->GetObject();
- }
+ LargePage* page = FindPage(a);
+ if (page != NULL) {
+ return page->GetObject();
}
return Failure::Exception();
}
-LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
- // TODO(853): Change this implementation to only find executable
- // chunks and use some kind of hash-based approach to speed it up.
- for (LargePage* chunk = first_page_;
- chunk != NULL;
- chunk = chunk->next_page()) {
- Address chunk_address = chunk->address();
- if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
- return chunk;
+LargePage* LargeObjectSpace::FindPage(Address a) {
+ uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
+ HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+ static_cast<uint32_t>(key),
+ false);
+ if (e != NULL) {
+ ASSERT(e->value != NULL);
+ LargePage* page = reinterpret_cast<LargePage*>(e->value);
+ ASSERT(page->is_valid());
+ if (page->Contains(a)) {
+ return page;
}
}
return NULL;
@@ -2656,6 +2675,17 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size();
page_count_--;
+ // Remove entries belonging to this page.
+ // Use variable alignment to help pass length check (<= 80 characters)
+ // of single line in tools/presubmit.py.
+ const intptr_t alignment = MemoryChunk::kAlignment;
+ uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
+ uintptr_t limit = base + (page->size()-1)/alignment;
+ for (uintptr_t key = base; key <= limit; key++) {
+ chunk_map_.Remove(reinterpret_cast<void*>(key),
+ static_cast<uint32_t>(key));
+ }
+
if (is_pointer_object) {
heap()->QueueMemoryChunkForFree(page);
} else {
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 75ca53444a..b614c3bd65 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -29,6 +29,7 @@
#define V8_SPACES_H_
#include "allocation.h"
+#include "hashmap.h"
#include "list.h"
#include "log.h"
@@ -2499,9 +2500,9 @@ class LargeObjectSpace : public Space {
// space, may be slow.
MaybeObject* FindObject(Address a);
- // Finds a large object page containing the given pc, returns NULL
+ // Finds a large object page containing the given address, returns NULL
// if such a page doesn't exist.
- LargePage* FindPageContainingPc(Address pc);
+ LargePage* FindPage(Address a);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2536,6 +2537,8 @@ class LargeObjectSpace : public Space {
intptr_t size_; // allocated bytes
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
+ // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
+ HashMap chunk_map_;
friend class LargeObjectIterator;
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 02f5c98cd3..84dde3dc27 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -554,14 +554,14 @@ function StringSlice(start, end) {
}
} else {
if (start_i > s_len) {
- start_i = s_len;
+ return '';
}
}
if (end_i < 0) {
end_i += s_len;
if (end_i < 0) {
- end_i = 0;
+ return '';
}
} else {
if (end_i > s_len) {
@@ -569,12 +569,11 @@ function StringSlice(start, end) {
}
}
- var num_c = end_i - start_i;
- if (num_c < 0) {
- num_c = 0;
+ if (end_i <= start_i) {
+ return '';
}
- return SubString(s, start_i, start_i + num_c);
+ return SubString(s, start_i, end_i);
}
@@ -611,6 +610,12 @@ function StringSplit(separator, limit) {
if (limit === 0) return [];
+ // Separator is a regular expression.
+ return StringSplitOnRegExp(subject, separator, limit, length);
+}
+
+
+function StringSplitOnRegExp(subject, separator, limit, length) {
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
if (length === 0) {
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index fa479b2e51..159be6a5a1 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -77,6 +77,17 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
}
+bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
+ Handle<Object> map_or_code = GetInfo(expr->id());
+ if (map_or_code->IsMap()) return false;
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
+ }
+ return false;
+}
+
+
bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsMap()) return true;
@@ -154,6 +165,13 @@ bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
}
+bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
+ ObjectLiteral::Property* prop) {
+ Handle<Object> map_or_code = GetInfo(prop->key()->id());
+ return map_or_code->IsMap();
+}
+
+
bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
Handle<Object> value = GetInfo(stmt->PrepareId());
return value->IsSmi() &&
@@ -268,6 +286,13 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
}
+Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
+ ObjectLiteral::Property* prop) {
+ ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
+ return Handle<Map>::cast(GetInfo(prop->key()->id()));
+}
+
+
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
return *GetInfo(expr->id()) ==
isolate_->builtins()->builtin(id);
@@ -368,6 +393,10 @@ TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
case BinaryOpIC::SMI:
switch (result_type) {
case BinaryOpIC::UNINITIALIZED:
+ if (expr->op() == Token::DIV) {
+ return TypeInfo::Double();
+ }
+ return TypeInfo::Smi();
case BinaryOpIC::SMI:
return TypeInfo::Smi();
case BinaryOpIC::INT32:
@@ -631,7 +660,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
SetInfo(ast_id, map);
}
}
- } else if (target->ic_state() == MEGAMORPHIC) {
+ } else {
SetInfo(ast_id, target);
}
break;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 84ec51d975..d461331bec 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -29,6 +29,7 @@
#define V8_TYPE_INFO_H_
#include "allocation.h"
+#include "ast.h"
#include "globals.h"
#include "zone-inl.h"
@@ -238,11 +239,13 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Isolate* isolate);
bool LoadIsMonomorphicNormal(Property* expr);
+ bool LoadIsUninitialized(Property* expr);
bool LoadIsMegamorphicWithTypeInfo(Property* expr);
bool StoreIsMonomorphicNormal(Expression* expr);
bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
+ bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
bool IsForInFastCase(ForInStatement* expr);
@@ -272,6 +275,8 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
+ Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
+
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index c0649d74fb..9c0ebf9e1b 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -78,7 +78,7 @@ template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
}
-unsigned Utf8::Encode(char* str, uchar c) {
+unsigned Utf8::Encode(char* str, uchar c, int previous) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
str[0] = c;
@@ -88,6 +88,13 @@ unsigned Utf8::Encode(char* str, uchar c) {
str[1] = 0x80 | (c & kMask);
return 2;
} else if (c <= kMaxThreeByteChar) {
+ if (Utf16::IsTrailSurrogate(c) &&
+ Utf16::IsLeadSurrogate(previous)) {
+ const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
+ return Encode(str - kUnmatchedSize,
+ Utf16::CombineSurrogatePair(previous, c),
+ Utf16::kNoPreviousCharacter) - kUnmatchedSize;
+ }
str[0] = 0xE0 | (c >> 12);
str[1] = 0x80 | ((c >> 6) & kMask);
str[2] = 0x80 | (c & kMask);
@@ -113,12 +120,16 @@ uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
return CalculateValue(bytes, length, cursor);
}
-unsigned Utf8::Length(uchar c) {
+unsigned Utf8::Length(uchar c, int previous) {
if (c <= kMaxOneByteChar) {
return 1;
} else if (c <= kMaxTwoByteChar) {
return 2;
} else if (c <= kMaxThreeByteChar) {
+ if (Utf16::IsTrailSurrogate(c) &&
+ Utf16::IsLeadSurrogate(previous)) {
+ return kSizeOfUnmatchedSurrogate - kBytesSavedByCombiningSurrogates;
+ }
return 3;
} else {
return 4;
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 61c649f5e4..14f380642a 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -276,6 +276,7 @@ uchar Utf8::CalculateValue(const byte* str,
return kBadChar;
}
+
const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
unsigned offset = *offset_ptr;
@@ -338,6 +339,16 @@ unsigned CharacterStream::Length() {
return result;
}
+unsigned CharacterStream::Utf16Length() {
+ unsigned result = 0;
+ while (has_more()) {
+ uchar c = GetNext();
+ result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
+ }
+ Rewind();
+ return result;
+}
+
void CharacterStream::Seek(unsigned position) {
Rewind();
for (unsigned i = 0; i < position; i++) {
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index fb9e6339e1..94ab1b4c1e 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -100,7 +100,7 @@ class UnicodeData {
static const uchar kMaxCodePoint;
};
-// --- U t f 8 ---
+// --- U t f 8 a n d 16 ---
template <typename Data>
class Buffer {
@@ -114,10 +114,46 @@ class Buffer {
unsigned length_;
};
+
+class Utf16 {
+ public:
+ static inline bool IsLeadSurrogate(int code) {
+ if (code == kNoPreviousCharacter) return false;
+ return (code & 0xfc00) == 0xd800;
+ }
+ static inline bool IsTrailSurrogate(int code) {
+ if (code == kNoPreviousCharacter) return false;
+ return (code & 0xfc00) == 0xdc00;
+ }
+
+ static inline int CombineSurrogatePair(uchar lead, uchar trail) {
+ return 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
+ }
+ static const int kNoPreviousCharacter = -1;
+ static const uchar kMaxNonSurrogateCharCode = 0xffff;
+ // Encoding a single UTF-16 code unit will produce 1, 2 or 3 bytes
+ // of UTF-8 data. The special case where the unit is a surrogate
+ // trail produces 1 byte net, because the encoding of the pair is
+ // 4 bytes and the 3 bytes that were used to encode the lead surrogate
+ // can be reclaimed.
+ static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3;
+ // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
+ // The illegality stems from the surrogate not being part of a pair.
+ static const int kUtf8BytesToCodeASurrogate = 3;
+ static inline uchar LeadSurrogate(int char_code) {
+ return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
+ }
+ static inline uchar TrailSurrogate(int char_code) {
+ return 0xdc00 + (char_code & 0x3ff);
+ }
+};
+
+
class Utf8 {
public:
- static inline uchar Length(uchar chr);
- static inline unsigned Encode(char* out, uchar c);
+ static inline uchar Length(uchar chr, int previous);
+ static inline unsigned Encode(
+ char* out, uchar c, int previous);
static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
unsigned capacity, unsigned* chars_read, unsigned* offset);
static uchar CalculateValue(const byte* str,
@@ -130,6 +166,11 @@ class Utf8 {
static const unsigned kMaxThreeByteChar = 0xffff;
static const unsigned kMaxFourByteChar = 0x1fffff;
+ // A single surrogate is coded as a 3 byte UTF-8 sequence, but two together
+ // that match are coded as a 4 byte UTF-8 sequence.
+ static const unsigned kBytesSavedByCombiningSurrogates = 2;
+ static const unsigned kSizeOfUnmatchedSurrogate = 3;
+
private:
template <unsigned s> friend class Utf8InputBuffer;
friend class Test;
@@ -147,6 +188,7 @@ class CharacterStream {
// Note that default implementation is not efficient.
virtual void Seek(unsigned);
unsigned Length();
+ unsigned Utf16Length();
virtual ~CharacterStream() { }
static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
unsigned& offset);
@@ -156,6 +198,7 @@ class CharacterStream {
unsigned capacity, unsigned& offset);
static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
virtual void Rewind() = 0;
+
protected:
virtual void FillBuffer() = 0;
// The number of characters left in the current buffer
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 98b3038593..506f3f6606 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -36,6 +36,8 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "log.h"
+#include "once.h"
+#include "platform.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "store-buffer.h"
@@ -43,8 +45,7 @@
namespace v8 {
namespace internal {
-static Mutex* init_once_mutex = OS::CreateMutex();
-static bool init_once_called = false;
+V8_DECLARE_ONCE(init_once);
bool V8::is_running_ = false;
bool V8::has_been_set_up_ = false;
@@ -53,7 +54,8 @@ bool V8::has_fatal_error_ = false;
bool V8::use_crankshaft_ = true;
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
-static Mutex* entropy_mutex = OS::CreateMutex();
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+
static EntropySource entropy_source;
@@ -117,7 +119,7 @@ static void seed_random(uint32_t* state) {
state[i] = FLAG_random_seed;
} else if (entropy_source != NULL) {
uint32_t val;
- ScopedLock lock(entropy_mutex);
+ ScopedLock lock(entropy_mutex.Pointer());
entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
state[i] = val;
} else {
@@ -237,12 +239,7 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number,
return heap_number;
}
-
-void V8::InitializeOncePerProcess() {
- ScopedLock lock(init_once_mutex);
- if (init_once_called) return;
- init_once_called = true;
-
+void V8::InitializeOncePerProcessImpl() {
// Set up the platform OS support.
OS::SetUp();
@@ -266,6 +263,12 @@ void V8::InitializeOncePerProcess() {
FLAG_gc_global = true;
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
+
+ LOperand::SetUpCaches();
+}
+
+void V8::InitializeOncePerProcess() {
+ CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 699c5a09b9..59ce602555 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -116,6 +116,7 @@ class V8 : public AllStatic {
static void FireCallCompletedCallback(Isolate* isolate);
private:
+ static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
// True if engine is currently running
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 57e6594340..78c78742dd 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
-#define BUILD_NUMBER 17
-#define PATCH_LEVEL 0
+#define BUILD_NUMBER 24
+#define PATCH_LEVEL 6
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 8e3caa444a..a9cc2ef287 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -235,9 +235,9 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
- return Assembler::kCallTargetSize;
+ return Assembler::kSpecialTargetSize;
} else {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 5397cd5eaa..2f0c542bc2 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -3045,8 +3045,6 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 7af33e126f..60b29e6475 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -161,23 +161,41 @@ struct Register {
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
-const Register rax = { 0 };
-const Register rcx = { 1 };
-const Register rdx = { 2 };
-const Register rbx = { 3 };
-const Register rsp = { 4 };
-const Register rbp = { 5 };
-const Register rsi = { 6 };
-const Register rdi = { 7 };
-const Register r8 = { 8 };
-const Register r9 = { 9 };
-const Register r10 = { 10 };
-const Register r11 = { 11 };
-const Register r12 = { 12 };
-const Register r13 = { 13 };
-const Register r14 = { 14 };
-const Register r15 = { 15 };
-const Register no_reg = { -1 };
+const int kRegister_rax_Code = 0;
+const int kRegister_rcx_Code = 1;
+const int kRegister_rdx_Code = 2;
+const int kRegister_rbx_Code = 3;
+const int kRegister_rsp_Code = 4;
+const int kRegister_rbp_Code = 5;
+const int kRegister_rsi_Code = 6;
+const int kRegister_rdi_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11;
+const int kRegister_r12_Code = 12;
+const int kRegister_r13_Code = 13;
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+const int kRegister_no_reg_Code = -1;
+
+const Register rax = { kRegister_rax_Code };
+const Register rcx = { kRegister_rcx_Code };
+const Register rdx = { kRegister_rdx_Code };
+const Register rbx = { kRegister_rbx_Code };
+const Register rsp = { kRegister_rsp_Code };
+const Register rbp = { kRegister_rbp_Code };
+const Register rsi = { kRegister_rsi_Code };
+const Register rdi = { kRegister_rdi_Code };
+const Register r8 = { kRegister_r8_Code };
+const Register r9 = { kRegister_r9_Code };
+const Register r10 = { kRegister_r10_Code };
+const Register r11 = { kRegister_r11_Code };
+const Register r12 = { kRegister_r12_Code };
+const Register r13 = { kRegister_r13_Code };
+const Register r14 = { kRegister_r14_Code };
+const Register r15 = { kRegister_r15_Code };
+const Register no_reg = { kRegister_no_reg_Code };
struct XMMRegister {
@@ -559,8 +577,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -573,8 +591,7 @@ class Assembler : public AssemblerBase {
inline Handle<Object> code_target_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
- static const int kCallTargetSize = 4; // Use 32-bit displacement.
- static const int kExternalTargetSize = 8; // Use 64-bit absolute.
+ static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index d616749e02..2845039771 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -5991,42 +5991,45 @@ struct AheadOfTimeWriteBarrierStubList {
};
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { rbx, rax, rdi, EMIT_REMEMBERED_SET },
+ { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
- { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+ { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
- { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
+ { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
+ { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
// GenerateStoreField calls the stub with two different permutations of
// registers. This is the second.
- { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+ { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
// StoreIC::GenerateNormal via GenerateDictionaryStore.
- { rbx, r8, r9, EMIT_REMEMBERED_SET },
+ { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
// KeyedStoreIC::GenerateGeneric.
- { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
+ { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { rdi, rbx, rcx, EMIT_REMEMBERED_SET},
- { rdx, rdi, rbx, EMIT_REMEMBERED_SET},
+ { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
+ { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { rdx, rbx, rdi, EMIT_REMEMBERED_SET},
- { rdx, rbx, rdi, OMIT_REMEMBERED_SET},
+ { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
+ { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { rdx, r11, r15, EMIT_REMEMBERED_SET},
+ { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
- { r11, rax, r15, EMIT_REMEMBERED_SET},
+ { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
- { rbx, rax, rcx, EMIT_REMEMBERED_SET},
+ { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 902f7e93a3..a8d39b25f6 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -55,8 +55,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-TranscendentalFunction CreateTranscendentalFunction(
- TranscendentalCache::Type type) {
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
@@ -96,7 +95,31 @@ TranscendentalFunction CreateTranscendentalFunction(
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<TranscendentalFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) return &sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ // Move double input into registers.
+ __ sqrtsd(xmm0, xmm0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 5cbdad7ac3..adeda0bb08 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -34,6 +34,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "disasm.h"
+#include "lazy-instance.h"
namespace disasm {
@@ -269,7 +270,8 @@ void InstructionTable::AddJumpConditionalShort() {
}
-static InstructionTable instruction_table;
+static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+ LAZY_INSTANCE_INITIALIZER;
static InstructionDesc cmov_instructions[16] = {
@@ -1338,7 +1340,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
}
- const InstructionDesc& idesc = instruction_table.Get(current);
+ const InstructionDesc& idesc = instruction_table.Get().Get(current);
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 6739cc84a2..85c5e758c0 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1377,6 +1377,15 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ PushRoot(Heap::kNullValueRootIndex);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1411,6 +1420,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1455,23 +1465,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ PushRoot(Heap::kNullValueRootIndex);
- } else {
- __ PushRoot(Heap::kNullValueRootIndex);
- VisitForStackValue(value);
- }
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(rsp, 0));
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index f707df030f..2ba2c57f40 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -540,7 +540,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -2544,14 +2543,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(rax)); // Used for parameter count.
- ASSERT(function.is(rdi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(rax));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2594,6 +2588,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ movq(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(rax)); // Used for parameter count.
+ ASSERT(function.is(rdi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(rax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -4224,34 +4229,46 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ movq(rcx, FieldOperand(source, i));
__ movq(FieldOperand(result, elements_offset + i), rcx);
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(rcx, Operand(result, *offset));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ } else {
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ }
} else {
- __ movq(rcx, value, RelocInfo::NONE);
- __ movq(FieldOperand(result, total_offset), rcx);
+ UNREACHABLE();
}
}
}
@@ -4701,7 +4718,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(NegateCondition(cc), instr->environment());
+ DeoptimizeIf(cc, instr->environment());
}
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index e2569c9f4d..d3e4cddf74 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1092,6 +1092,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
LOperand* receiver = UseFixed(instr->receiver(), rax);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 3d5d8548cb..2d8fd2ecce 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -178,7 +178,8 @@ class LCodeGen;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
- V(DateField)
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -469,6 +470,20 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 2118886e95..f7db250f9e 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -799,8 +799,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
}
-static const Register saved_regs[] =
- { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const Register saved_regs[] = {
+ REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
+ REG(r9), REG(r10), REG(r11)
+};
+
+#undef REG
+
static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
@@ -2418,7 +2425,8 @@ void MacroAssembler::Dropad() {
// Order general registers are pushed by Pushad:
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+const int
+MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
0,
1,
2,
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 86eb312831..6bb5cfeb42 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1307,7 +1307,7 @@ class MacroAssembler: public Assembler {
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
- static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+ static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 11;
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 773fc4c16c..837c2543c3 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -564,7 +564,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(rax, Operand(current_character(), -minus));
__ and_(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 18cb3c062a..f07f6b6b72 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1224,14 +1224,9 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(rdx, miss);
- }
// Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, miss);
CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
}
@@ -2665,14 +2660,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(rax, &miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(rax, &miss);
CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index bc092b5f5e..864846553a 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -240,7 +240,7 @@ class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
};
-typedef TemplateHashMap<ZoneListAllocationPolicy> ZoneHashMap;
+typedef TemplateHashMapImpl<ZoneListAllocationPolicy> ZoneHashMap;
} } // namespace v8::internal
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 2de0afba17..af28be19d8 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -84,10 +84,6 @@ test-debug/DebugBreakLoop: SKIP
##############################################################################
-[ $arch == mips ]
-test-serialize: SKIP
-
-##############################################################################
[ $arch == mips && $crankshaft ]
# Tests that time out with crankshaft.
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 5137c65637..b1a23c1ef7 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -5526,6 +5526,17 @@ static int StrNCmp16(uint16_t* a, uint16_t* b, int n) {
}
+int GetUtf8Length(Handle<String> str) {
+ int len = str->Utf8Length();
+ if (len < 0) {
+ i::Handle<i::String> istr(v8::Utils::OpenHandle(*str));
+ i::FlattenString(istr);
+ len = str->Utf8Length();
+ }
+ return len;
+}
+
+
THREADED_TEST(StringWrite) {
LocalContext context;
v8::HandleScope scope;
@@ -5606,7 +5617,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(0, strncmp(utf8buf, "ab\1", 3));
memset(utf8buf, 0x1, sizeof(utf8buf));
- len = left_tree->Utf8Length();
+ len = GetUtf8Length(left_tree);
int utf8_expected =
(0x80 + (0x800 - 0x80) * 2 + (0xd800 - 0x800) * 3) / kStride;
CHECK_EQ(utf8_expected, len);
@@ -5620,7 +5631,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(1, utf8buf[utf8_expected]);
memset(utf8buf, 0x1, sizeof(utf8buf));
- len = right_tree->Utf8Length();
+ len = GetUtf8Length(right_tree);
CHECK_EQ(utf8_expected, len);
len = right_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
CHECK_EQ(utf8_expected, len);
@@ -5745,6 +5756,225 @@ THREADED_TEST(StringWrite) {
}
+static void Utf16Helper(
+ LocalContext& context,
+ const char* name,
+ const char* lengths_name,
+ int len) {
+ Local<v8::Array> a =
+ Local<v8::Array>::Cast(context->Global()->Get(v8_str(name)));
+ Local<v8::Array> alens =
+ Local<v8::Array>::Cast(context->Global()->Get(v8_str(lengths_name)));
+ for (int i = 0; i < len; i++) {
+ Local<v8::String> string =
+ Local<v8::String>::Cast(a->Get(i));
+ Local<v8::Number> expected_len =
+ Local<v8::Number>::Cast(alens->Get(i));
+ CHECK_EQ(expected_len->Value() != string->Length(),
+ string->MayContainNonAscii());
+ int length = GetUtf8Length(string);
+ CHECK_EQ(static_cast<int>(expected_len->Value()), length);
+ }
+}
+
+
+static uint16_t StringGet(Handle<String> str, int index) {
+ i::Handle<i::String> istring =
+ v8::Utils::OpenHandle(String::Cast(*str));
+ return istring->Get(index);
+}
+
+
+static void WriteUtf8Helper(
+ LocalContext& context,
+ const char* name,
+ const char* lengths_name,
+ int len) {
+ Local<v8::Array> b =
+ Local<v8::Array>::Cast(context->Global()->Get(v8_str(name)));
+ Local<v8::Array> alens =
+ Local<v8::Array>::Cast(context->Global()->Get(v8_str(lengths_name)));
+ char buffer[1000];
+ char buffer2[1000];
+ for (int i = 0; i < len; i++) {
+ Local<v8::String> string =
+ Local<v8::String>::Cast(b->Get(i));
+ Local<v8::Number> expected_len =
+ Local<v8::Number>::Cast(alens->Get(i));
+ int utf8_length = static_cast<int>(expected_len->Value());
+ for (int j = utf8_length + 1; j >= 0; j--) {
+ memset(reinterpret_cast<void*>(&buffer), 42, sizeof(buffer));
+ memset(reinterpret_cast<void*>(&buffer2), 42, sizeof(buffer2));
+ int nchars;
+ int utf8_written =
+ string->WriteUtf8(buffer, j, &nchars, String::NO_OPTIONS);
+ int utf8_written2 =
+ string->WriteUtf8(buffer2, j, &nchars, String::NO_NULL_TERMINATION);
+ CHECK_GE(utf8_length + 1, utf8_written);
+ CHECK_GE(utf8_length, utf8_written2);
+ for (int k = 0; k < utf8_written2; k++) {
+ CHECK_EQ(buffer[k], buffer2[k]);
+ }
+ CHECK(nchars * 3 >= utf8_written - 1);
+ CHECK(nchars <= utf8_written);
+ if (j == utf8_length + 1) {
+ CHECK_EQ(utf8_written2, utf8_length);
+ CHECK_EQ(utf8_written2 + 1, utf8_written);
+ }
+ CHECK_EQ(buffer[utf8_written], 42);
+ if (j > utf8_length) {
+ if (utf8_written != 0) CHECK_EQ(buffer[utf8_written - 1], 0);
+ if (utf8_written > 1) CHECK_NE(buffer[utf8_written - 2], 42);
+ Handle<String> roundtrip = v8_str(buffer);
+ CHECK(roundtrip->Equals(string));
+ } else {
+ if (utf8_written != 0) CHECK_NE(buffer[utf8_written - 1], 42);
+ }
+ if (utf8_written2 != 0) CHECK_NE(buffer[utf8_written - 1], 42);
+ if (nchars >= 2) {
+ uint16_t trail = StringGet(string, nchars - 1);
+ uint16_t lead = StringGet(string, nchars - 2);
+ if (((lead & 0xfc00) == 0xd800) &&
+ ((trail & 0xfc00) == 0xdc00)) {
+ unsigned char u1 = buffer2[utf8_written2 - 4];
+ unsigned char u2 = buffer2[utf8_written2 - 3];
+ unsigned char u3 = buffer2[utf8_written2 - 2];
+ unsigned char u4 = buffer2[utf8_written2 - 1];
+ CHECK_EQ((u1 & 0xf8), 0xf0);
+ CHECK_EQ((u2 & 0xc0), 0x80);
+ CHECK_EQ((u3 & 0xc0), 0x80);
+ CHECK_EQ((u4 & 0xc0), 0x80);
+ uint32_t c = 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
+ CHECK_EQ((u4 & 0x3f), (c & 0x3f));
+ CHECK_EQ((u3 & 0x3f), ((c >> 6) & 0x3f));
+ CHECK_EQ((u2 & 0x3f), ((c >> 12) & 0x3f));
+ CHECK_EQ((u1 & 0x3), c >> 18);
+ }
+ }
+ }
+ }
+}
+
+
+THREADED_TEST(Utf16) {
+ LocalContext context;
+ v8::HandleScope scope;
+ CompileRun(
+ "var pad = '01234567890123456789';"
+ "var p = [];"
+ "var plens = [20, 3, 3];"
+ "p.push('01234567890123456789');"
+ "var lead = 0xd800;"
+ "var trail = 0xdc00;"
+ "p.push(String.fromCharCode(0xd800));"
+ "p.push(String.fromCharCode(0xdc00));"
+ "var a = [];"
+ "var b = [];"
+ "var c = [];"
+ "var alens = [];"
+ "for (var i = 0; i < 3; i++) {"
+ " p[1] = String.fromCharCode(lead++);"
+ " for (var j = 0; j < 3; j++) {"
+ " p[2] = String.fromCharCode(trail++);"
+ " a.push(p[i] + p[j]);"
+ " b.push(p[i] + p[j]);"
+ " c.push(p[i] + p[j]);"
+ " alens.push(plens[i] + plens[j]);"
+ " }"
+ "}"
+ "alens[5] -= 2;" // Here the surrogate pairs match up.
+ "var a2 = [];"
+ "var b2 = [];"
+ "var c2 = [];"
+ "var a2lens = [];"
+ "for (var m = 0; m < 9; m++) {"
+ " for (var n = 0; n < 9; n++) {"
+ " a2.push(a[m] + a[n]);"
+ " b2.push(b[m] + b[n]);"
+ " var newc = 'x' + c[m] + c[n] + 'y';"
+ " c2.push(newc.substring(1, newc.length - 1));"
+ " var utf = alens[m] + alens[n];" // And here.
+ // The 'n's that start with 0xdc.. are 6-8
+ // The 'm's that end with 0xd8.. are 1, 4 and 7
+ " if ((m % 3) == 1 && n >= 6) utf -= 2;"
+ " a2lens.push(utf);"
+ " }"
+ "}");
+ Utf16Helper(context, "a", "alens", 9);
+ Utf16Helper(context, "a2", "a2lens", 81);
+ WriteUtf8Helper(context, "b", "alens", 9);
+ WriteUtf8Helper(context, "b2", "a2lens", 81);
+ WriteUtf8Helper(context, "c2", "a2lens", 81);
+}
+
+
+static bool SameSymbol(Handle<String> s1, Handle<String> s2) {
+ i::Handle<i::String> is1(v8::Utils::OpenHandle(*s1));
+ i::Handle<i::String> is2(v8::Utils::OpenHandle(*s2));
+ return *is1 == *is2;
+}
+
+
+static void SameSymbolHelper(const char* a, const char* b) {
+ Handle<String> symbol1 = v8::String::NewSymbol(a);
+ Handle<String> symbol2 = v8::String::NewSymbol(b);
+ CHECK(SameSymbol(symbol1, symbol2));
+}
+
+
+THREADED_TEST(Utf16Symbol) {
+ LocalContext context;
+ v8::HandleScope scope;
+
+ Handle<String> symbol1 = v8::String::NewSymbol("abc");
+ Handle<String> symbol2 = v8::String::NewSymbol("abc");
+ CHECK(SameSymbol(symbol1, symbol2));
+
+ SameSymbolHelper("\360\220\220\205", // 4 byte encoding.
+ "\355\240\201\355\260\205"); // 2 3-byte surrogates.
+ SameSymbolHelper("\355\240\201\355\260\206", // 2 3-byte surrogates.
+ "\360\220\220\206"); // 4 byte encoding.
+ SameSymbolHelper("x\360\220\220\205", // 4 byte encoding.
+ "x\355\240\201\355\260\205"); // 2 3-byte surrogates.
+ SameSymbolHelper("x\355\240\201\355\260\206", // 2 3-byte surrogates.
+ "x\360\220\220\206"); // 4 byte encoding.
+ CompileRun(
+ "var sym0 = 'benedictus';"
+ "var sym0b = 'S\303\270ren';"
+ "var sym1 = '\355\240\201\355\260\207';"
+ "var sym2 = '\360\220\220\210';"
+ "var sym3 = 'x\355\240\201\355\260\207';"
+ "var sym4 = 'x\360\220\220\210';"
+ "if (sym1.length != 2) throw sym1;"
+ "if (sym1.charCodeAt(1) != 0xdc07) throw sym1.charCodeAt(1);"
+ "if (sym2.length != 2) throw sym2;"
+ "if (sym2.charCodeAt(1) != 0xdc08) throw sym2.charCodeAt(2);"
+ "if (sym3.length != 3) throw sym3;"
+ "if (sym3.charCodeAt(2) != 0xdc07) throw sym1.charCodeAt(2);"
+ "if (sym4.length != 3) throw sym4;"
+ "if (sym4.charCodeAt(2) != 0xdc08) throw sym2.charCodeAt(2);");
+ Handle<String> sym0 = v8::String::NewSymbol("benedictus");
+ Handle<String> sym0b = v8::String::NewSymbol("S\303\270ren");
+ Handle<String> sym1 = v8::String::NewSymbol("\355\240\201\355\260\207");
+ Handle<String> sym2 = v8::String::NewSymbol("\360\220\220\210");
+ Handle<String> sym3 = v8::String::NewSymbol("x\355\240\201\355\260\207");
+ Handle<String> sym4 = v8::String::NewSymbol("x\360\220\220\210");
+ v8::Local<v8::Object> global = context->Global();
+ Local<Value> s0 = global->Get(v8_str("sym0"));
+ Local<Value> s0b = global->Get(v8_str("sym0b"));
+ Local<Value> s1 = global->Get(v8_str("sym1"));
+ Local<Value> s2 = global->Get(v8_str("sym2"));
+ Local<Value> s3 = global->Get(v8_str("sym3"));
+ Local<Value> s4 = global->Get(v8_str("sym4"));
+ CHECK(SameSymbol(sym0, Handle<String>(String::Cast(*s0))));
+ CHECK(SameSymbol(sym0b, Handle<String>(String::Cast(*s0b))));
+ CHECK(SameSymbol(sym1, Handle<String>(String::Cast(*s1))));
+ CHECK(SameSymbol(sym2, Handle<String>(String::Cast(*s2))));
+ CHECK(SameSymbol(sym3, Handle<String>(String::Cast(*s3))));
+ CHECK(SameSymbol(sym4, Handle<String>(String::Cast(*s4))));
+}
+
+
THREADED_TEST(ToArrayIndex) {
v8::HandleScope scope;
LocalContext context;
@@ -11705,6 +11935,9 @@ THREADED_TEST(MorphCompositeStringTest) {
"var slice = lhs.substring(1, lhs.length - 1);"
"var slice_on_cons = (lhs + rhs).substring(1, lhs.length *2 - 1);");
+ CHECK(!lhs->MayContainNonAscii());
+ CHECK(!rhs->MayContainNonAscii());
+
MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index a6c76f03ed..6985433d96 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -219,21 +219,21 @@ TEST(MIPS2) {
// Bit twiddling instructions & conditional moves.
// Uses t0-t7 as set above.
- __ clz(v0, t0); // 29
- __ clz(v1, t1); // 19
+ __ Clz(v0, t0); // 29
+ __ Clz(v1, t1); // 19
__ addu(v0, v0, v1); // 48
- __ clz(v1, t2); // 3
+ __ Clz(v1, t2); // 3
__ addu(v0, v0, v1); // 51
- __ clz(v1, t7); // 0
+ __ Clz(v1, t7); // 0
__ addu(v0, v0, v1); // 51
__ Branch(&error, ne, v0, Operand(51));
- __ movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0).
+ __ Movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0).
__ Ins(a0, t1, 12, 8); // 0x7ff34fff
__ Branch(&error, ne, a0, Operand(0x7ff34fff));
- __ movz(a0, t6, t7); // a0 not updated (t7 is NOT 0).
+ __ Movz(a0, t6, t7); // a0 not updated (t7 is NOT 0).
__ Ext(a1, a0, 8, 12); // 0x34f
__ Branch(&error, ne, a1, Operand(0x34f));
- __ movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back.
+ __ Movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back.
__ Branch(&error, ne, a0, Operand(t6));
// Everything was correctly executed. Load the expected result.
@@ -579,8 +579,13 @@ TEST(MIPS7) {
__ bind(&neither_is_nan);
- __ c(OLT, D, f6, f4, 2);
- __ bc1t(&less_than, 2);
+ if (kArchVariant == kLoongson) {
+ __ c(OLT, D, f6, f4);
+ __ bc1t(&less_than);
+ } else {
+ __ c(OLT, D, f6, f4, 2);
+ __ bc1t(&less_than, 2);
+ }
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
@@ -774,7 +779,7 @@ TEST(MIPS10) {
Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU) && mips32r2) {
+ if (CpuFeatures::IsSupported(FPU) && kArchVariant == kMips32r2) {
CpuFeatures::Scope scope(FPU);
// Load all structure elements to registers.
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 8eadc6483b..1f8742452d 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -148,12 +148,14 @@ TEST(Type0) {
COMPARE(divu(v0, v1),
"0043001b divu v0, v1");
- COMPARE(mul(a0, a1, a2),
- "70a62002 mul a0, a1, a2");
- COMPARE(mul(t2, t3, t4),
- "716c5002 mul t2, t3, t4");
- COMPARE(mul(v0, v1, s0),
- "70701002 mul v0, v1, s0");
+ if (kArchVariant != kLoongson) {
+ COMPARE(mul(a0, a1, a2),
+ "70a62002 mul a0, a1, a2");
+ COMPARE(mul(t2, t3, t4),
+ "716c5002 mul t2, t3, t4");
+ COMPARE(mul(v0, v1, s0),
+ "70701002 mul v0, v1, s0");
+ }
COMPARE(addiu(a0, a1, 0x0),
"24a40000 addiu a0, a1, 0");
@@ -274,7 +276,7 @@ TEST(Type0) {
COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp");
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8),
@@ -377,48 +379,50 @@ TEST(Type0) {
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
- COMPARE(movz(a0, a1, a2),
- "00a6200a movz a0, a1, a2");
- COMPARE(movz(s0, s1, s2),
- "0232800a movz s0, s1, s2");
- COMPARE(movz(t2, t3, t4),
- "016c500a movz t2, t3, t4");
- COMPARE(movz(v0, v1, a2),
- "0066100a movz v0, v1, a2");
- COMPARE(movn(a0, a1, a2),
- "00a6200b movn a0, a1, a2");
- COMPARE(movn(s0, s1, s2),
- "0232800b movn s0, s1, s2");
- COMPARE(movn(t2, t3, t4),
- "016c500b movn t2, t3, t4");
- COMPARE(movn(v0, v1, a2),
- "0066100b movn v0, v1, a2");
-
- COMPARE(movt(a0, a1, 1),
- "00a52001 movt a0, a1, 1");
- COMPARE(movt(s0, s1, 2),
- "02298001 movt s0, s1, 2");
- COMPARE(movt(t2, t3, 3),
- "016d5001 movt t2, t3, 3");
- COMPARE(movt(v0, v1, 7),
- "007d1001 movt v0, v1, 7");
- COMPARE(movf(a0, a1, 0),
- "00a02001 movf a0, a1, 0");
- COMPARE(movf(s0, s1, 4),
- "02308001 movf s0, s1, 4");
- COMPARE(movf(t2, t3, 5),
- "01745001 movf t2, t3, 5");
- COMPARE(movf(v0, v1, 6),
- "00781001 movf v0, v1, 6");
-
- COMPARE(clz(a0, a1),
- "70a42020 clz a0, a1");
- COMPARE(clz(s6, s7),
- "72f6b020 clz s6, s7");
- COMPARE(clz(v0, v1),
- "70621020 clz v0, v1");
-
- if (mips32r2) {
+ if (kArchVariant != kLoongson) {
+ COMPARE(movz(a0, a1, a2),
+ "00a6200a movz a0, a1, a2");
+ COMPARE(movz(s0, s1, s2),
+ "0232800a movz s0, s1, s2");
+ COMPARE(movz(t2, t3, t4),
+ "016c500a movz t2, t3, t4");
+ COMPARE(movz(v0, v1, a2),
+ "0066100a movz v0, v1, a2");
+ COMPARE(movn(a0, a1, a2),
+ "00a6200b movn a0, a1, a2");
+ COMPARE(movn(s0, s1, s2),
+ "0232800b movn s0, s1, s2");
+ COMPARE(movn(t2, t3, t4),
+ "016c500b movn t2, t3, t4");
+ COMPARE(movn(v0, v1, a2),
+ "0066100b movn v0, v1, a2");
+
+ COMPARE(movt(a0, a1, 1),
+ "00a52001 movt a0, a1, 1");
+ COMPARE(movt(s0, s1, 2),
+ "02298001 movt s0, s1, 2");
+ COMPARE(movt(t2, t3, 3),
+ "016d5001 movt t2, t3, 3");
+ COMPARE(movt(v0, v1, 7),
+ "007d1001 movt v0, v1, 7");
+ COMPARE(movf(a0, a1, 0),
+ "00a02001 movf a0, a1, 0");
+ COMPARE(movf(s0, s1, 4),
+ "02308001 movf s0, s1, 4");
+ COMPARE(movf(t2, t3, 5),
+ "01745001 movf t2, t3, 5");
+ COMPARE(movf(v0, v1, 6),
+ "00781001 movf v0, v1, 6");
+
+ COMPARE(clz(a0, a1),
+ "70a42020 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "72f6b020 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "70621020 clz v0, v1");
+ }
+
+ if (kArchVariant == kMips32r2) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 7a227cdf9b..a56f250c2a 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -352,6 +352,59 @@ TEST(HeapSnapshotInternalReferences) {
#define CHECK_NE_UINT64_T(a, b) \
CHECK((a) != (b)) // NOLINT
+TEST(HeapEntryIdsAndArrayShift) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "function AnObject() {\n"
+ " this.first = 'first';\n"
+ " this.second = 'second';\n"
+ "}\n"
+ "var a = new Array();\n"
+ "for (var i = 0; i < 10; ++i)\n"
+ " a.push(new AnObject());\n");
+ const v8::HeapSnapshot* snapshot1 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
+
+ CompileRun(
+ "for (var i = 0; i < 1; ++i)\n"
+ " a.shift();\n");
+
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ const v8::HeapSnapshot* snapshot2 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
+
+ const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
+ const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
+ CHECK_NE_UINT64_T(0, global1->GetId());
+ CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
+
+ const v8::HeapGraphNode* a1 =
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a1);
+ const v8::HeapGraphNode* e1 =
+ GetProperty(a1, v8::HeapGraphEdge::kHidden, "1");
+ CHECK_NE(NULL, e1);
+ const v8::HeapGraphNode* k1 =
+ GetProperty(e1, v8::HeapGraphEdge::kInternal, "elements");
+ CHECK_NE(NULL, k1);
+ const v8::HeapGraphNode* a2 =
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a2);
+ const v8::HeapGraphNode* e2 =
+ GetProperty(a2, v8::HeapGraphEdge::kHidden, "1");
+ CHECK_NE(NULL, e2);
+ const v8::HeapGraphNode* k2 =
+ GetProperty(e2, v8::HeapGraphEdge::kInternal, "elements");
+ CHECK_NE(NULL, k2);
+
+ CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
+ CHECK_EQ_UINT64_T(e1->GetId(), e2->GetId());
+ CHECK_EQ_UINT64_T(k1->GetId(), k2->GetId());
+}
+
TEST(HeapEntryIdsAndGC) {
v8::HandleScope scope;
LocalContext env;
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index f536e6b193..6847ef7eea 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -307,6 +307,7 @@ TEST(CFromJSStackTrace) {
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
int base = 0;
CHECK_GT(sample.frames_count, base + 1);
+
CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0]));
CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1]));
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index cd8a6aff38..6bcae7c308 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -63,7 +63,7 @@ TEST(ScanKeywords) {
int length = i::StrLength(key_token.keyword);
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
- i::Utf8ToUC16CharacterStream stream(keyword, length);
+ i::Utf8ToUtf16CharacterStream stream(keyword, length);
i::Scanner scanner(&unicode_cache);
// The scanner should parse Harmony keywords for this test.
scanner.SetHarmonyScoping(true);
@@ -74,7 +74,7 @@ TEST(ScanKeywords) {
}
// Removing characters will make keyword matching fail.
{
- i::Utf8ToUC16CharacterStream stream(keyword, length - 1);
+ i::Utf8ToUtf16CharacterStream stream(keyword, length - 1);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -85,7 +85,7 @@ TEST(ScanKeywords) {
for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
memmove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
- i::Utf8ToUC16CharacterStream stream(buffer, length + 1);
+ i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -95,7 +95,7 @@ TEST(ScanKeywords) {
{
memmove(buffer, keyword, length);
buffer[length - 1] = '_';
- i::Utf8ToUC16CharacterStream stream(buffer, length);
+ i::Utf8ToUtf16CharacterStream stream(buffer, length);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -255,7 +255,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
- i::Utf8ToUC16CharacterStream stream(
+ i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
i::CompleteParserRecorder log;
@@ -291,7 +291,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
- i::Utf8ToUC16CharacterStream stream(
+ i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
i::CompleteParserRecorder log;
@@ -326,8 +326,9 @@ TEST(RegressChromium62639) {
// and then used the invalid currently scanned literal. This always
// failed in debug mode, and sometimes crashed in release mode.
- i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
- static_cast<unsigned>(strlen(program)));
+ i::Utf8ToUtf16CharacterStream stream(
+ reinterpret_cast<const i::byte*>(program),
+ static_cast<unsigned>(strlen(program)));
i::ScriptDataImpl* data =
i::ParserApi::PreParse(&stream, NULL, false);
CHECK(data->HasError());
@@ -360,7 +361,7 @@ TEST(Regress928) {
int first_function =
static_cast<int>(strstr(program, "function") - program);
- int first_lbrace = first_function + static_cast<int>(strlen("function () "));
+ int first_lbrace = first_function + i::StrLength("function () ");
CHECK_EQ('{', program[first_lbrace]);
i::FunctionEntry entry1 = data->GetFunctionEntry(first_lbrace);
CHECK(!entry1.is_valid());
@@ -368,7 +369,7 @@ TEST(Regress928) {
int second_function =
static_cast<int>(strstr(program + first_lbrace, "function") - program);
int second_lbrace =
- second_function + static_cast<int>(strlen("function () "));
+ second_function + i::StrLength("function () ");
CHECK_EQ('{', program[second_lbrace]);
i::FunctionEntry entry2 = data->GetFunctionEntry(second_lbrace);
CHECK(entry2.is_valid());
@@ -392,7 +393,7 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
- i::Utf8ToUC16CharacterStream stream(
+ i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(*program),
static_cast<unsigned>(kProgramSize));
i::CompleteParserRecorder log;
@@ -449,10 +450,10 @@ void TestCharacterStream(const char* ascii_source,
i::Handle<i::String> uc16_string(
FACTORY->NewExternalStringFromTwoByte(&resource));
- i::ExternalTwoByteStringUC16CharacterStream uc16_stream(
+ i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
- i::GenericStringUC16CharacterStream string_stream(ascii_string, start, end);
- i::Utf8ToUC16CharacterStream utf8_stream(
+ i::GenericStringUtf16CharacterStream string_stream(ascii_string, start, end);
+ i::Utf8ToUtf16CharacterStream utf8_stream(
reinterpret_cast<const i::byte*>(ascii_source), end);
utf8_stream.SeekForward(start);
@@ -575,12 +576,14 @@ TEST(Utf8CharacterStream) {
char buffer[kAllUtf8CharsSizeU];
unsigned cursor = 0;
for (int i = 0; i <= kMaxUC16Char; i++) {
- cursor += unibrow::Utf8::Encode(buffer + cursor, i);
+ cursor += unibrow::Utf8::Encode(buffer + cursor,
+ i,
+ unibrow::Utf16::kNoPreviousCharacter);
}
ASSERT(cursor == kAllUtf8CharsSizeU);
- i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
- kAllUtf8CharsSizeU);
+ i::Utf8ToUtf16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
+ kAllUtf8CharsSizeU);
for (int i = 0; i <= kMaxUC16Char; i++) {
CHECK_EQU(i, stream.pos());
int32_t c = stream.Advance();
@@ -610,7 +613,7 @@ TEST(Utf8CharacterStream) {
#undef CHECK_EQU
-void TestStreamScanner(i::UC16CharacterStream* stream,
+void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
@@ -633,8 +636,8 @@ TEST(StreamScanner) {
v8::V8::Initialize();
const char* str1 = "{ foo get for : */ <- \n\n /*foo*/ bib";
- i::Utf8ToUC16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
- static_cast<unsigned>(strlen(str1)));
+ i::Utf8ToUtf16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
+ static_cast<unsigned>(strlen(str1)));
i::Token::Value expectations1[] = {
i::Token::LBRACE,
i::Token::IDENTIFIER,
@@ -652,8 +655,8 @@ TEST(StreamScanner) {
TestStreamScanner(&stream1, expectations1, 0, 0);
const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
- i::Utf8ToUC16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
- static_cast<unsigned>(strlen(str2)));
+ i::Utf8ToUtf16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
+ static_cast<unsigned>(strlen(str2)));
i::Token::Value expectations2[] = {
i::Token::CASE,
i::Token::DEFAULT,
@@ -683,7 +686,7 @@ TEST(StreamScanner) {
for (int i = 0; i <= 4; i++) {
expectations3[6 - i] = i::Token::ILLEGAL;
expectations3[5 - i] = i::Token::EOS;
- i::Utf8ToUC16CharacterStream stream3(
+ i::Utf8ToUtf16CharacterStream stream3(
reinterpret_cast<const i::byte*>(str3),
static_cast<unsigned>(strlen(str3)));
TestStreamScanner(&stream3, expectations3, 1, 1 + i);
@@ -692,7 +695,7 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
- i::Utf8ToUC16CharacterStream stream(
+ i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(re_source),
static_cast<unsigned>(strlen(re_source)));
i::Scanner scanner(i::Isolate::Current()->unicode_cache());
@@ -748,6 +751,67 @@ TEST(RegExpScanning) {
}
+static int Utf8LengthHelper(const char* s) {
+ int len = i::StrLength(s);
+ int character_length = len;
+ for (int i = 0; i < len; i++) {
+ unsigned char c = s[i];
+ int input_offset = 0;
+ int output_adjust = 0;
+ if (c > 0x7f) {
+ if (c < 0xc0) continue;
+ if (c >= 0xf0) {
+ if (c >= 0xf8) {
+ // 5 and 6 byte UTF-8 sequences turn into a kBadChar for each UTF-8
+ // byte.
+ continue; // Handle first UTF-8 byte.
+ }
+ if ((c & 7) == 0 && ((s[i + 1] & 0x30) == 0)) {
+ // This 4 byte sequence could have been coded as a 3 byte sequence.
+ // Record a single kBadChar for the first byte and continue.
+ continue;
+ }
+ input_offset = 3;
+ // 4 bytes of UTF-8 turn into 2 UTF-16 code units.
+ character_length -= 2;
+ } else if (c >= 0xe0) {
+ if ((c & 0xf) == 0 && ((s[i + 1] & 0x20) == 0)) {
+ // This 3 byte sequence could have been coded as a 2 byte sequence.
+ // Record a single kBadChar for the first byte and continue.
+ continue;
+ }
+ input_offset = 2;
+ // 3 bytes of UTF-8 turn into 1 UTF-16 code unit.
+ output_adjust = 2;
+ } else {
+ if ((c & 0x1e) == 0) {
+ // This 2 byte sequence could have been coded as a 1 byte sequence.
+ // Record a single kBadChar for the first byte and continue.
+ continue;
+ }
+ input_offset = 1;
+ // 2 bytes of UTF-8 turn into 1 UTF-16 code unit.
+ output_adjust = 1;
+ }
+ bool bad = false;
+ for (int j = 1; j <= input_offset; j++) {
+ if ((s[i + j] & 0xc0) != 0x80) {
+ // Bad UTF-8 sequence turns the first in the sequence into kBadChar,
+ // which is a single UTF-16 code unit.
+ bad = true;
+ break;
+ }
+ }
+ if (!bad) {
+ i += input_offset;
+ character_length -= output_adjust;
+ }
+ }
+ }
+ return character_length;
+}
+
+
TEST(ScopePositions) {
// Test the parser for correctly setting the start and end positions
// of a scope. We check the scope positions of exactly one scope
@@ -835,6 +899,91 @@ TEST(ScopePositions) {
{ " for ", "(let x in {})\n"
" statement;", "\n"
" more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ // Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
+ // the preparser off in terms of byte offsets.
+ // 6 byte encoding.
+ { " 'foo\355\240\201\355\260\211';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // 4 byte encoding.
+ { " 'foo\360\220\220\212';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // 3 byte encoding of \u0fff.
+ { " 'foo\340\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 6 byte encoding with missing last byte.
+ { " 'foo\355\240\201\355\211';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 3 byte encoding of \u0fff with missing last byte.
+ { " 'foo\340\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 3 byte encoding of \u0fff with missing 2 last bytes.
+ { " 'foo\340';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
+ { " 'foo\340\203\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 3 byte encoding of \u007f should be a 2 byte encoding.
+ { " 'foo\340\201\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Unpaired lead surrogate.
+ { " 'foo\355\240\201';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Unpaired lead surrogate where following code point is a 3 byte sequence.
+ { " 'foo\355\240\201\340\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Unpaired lead surrogate where following code point is a 4 byte encoding
+ // of a trail surrogate.
+ { " 'foo\355\240\201\360\215\260\211';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Unpaired trail surrogate.
+ { " 'foo\355\260\211';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // 2 byte encoding of \u00ff.
+ { " 'foo\303\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 2 byte encoding of \u00ff with missing last byte.
+ { " 'foo\303';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Broken 2 byte encoding of \u007f should be a 1 byte encoding.
+ { " 'foo\301\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Illegal 5 byte encoding.
+ { " 'foo\370\277\277\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Illegal 6 byte encoding.
+ { " 'foo\374\277\277\277\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Illegal 0xfe byte
+ { " 'foo\376\277\277\277\277\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ // Illegal 0xff byte
+ { " 'foo\377\277\277\277\277\277\277\277';\n"
+ " (function fun", "(a,b) { infunction; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ { " 'foo';\n"
+ " (function fun", "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ { " 'foo';\n"
+ " (function fun", "(a,b) { 'bar\360\220\220\214'; }", ")();",
+ i::FUNCTION_SCOPE, i::CLASSIC_MODE },
{ NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
};
@@ -848,20 +997,24 @@ TEST(ScopePositions) {
i::FLAG_harmony_scoping = true;
for (int i = 0; source_data[i].outer_prefix; i++) {
- int kPrefixLen = i::StrLength(source_data[i].outer_prefix);
- int kInnerLen = i::StrLength(source_data[i].inner_source);
- int kSuffixLen = i::StrLength(source_data[i].outer_suffix);
+ int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
+ int kInnerLen = Utf8LengthHelper(source_data[i].inner_source);
+ int kSuffixLen = Utf8LengthHelper(source_data[i].outer_suffix);
+ int kPrefixByteLen = i::StrLength(source_data[i].outer_prefix);
+ int kInnerByteLen = i::StrLength(source_data[i].inner_source);
+ int kSuffixByteLen = i::StrLength(source_data[i].outer_suffix);
int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen;
- i::Vector<char> program = i::Vector<char>::New(kProgramSize + 1);
- int length = i::OS::SNPrintF(program, "%s%s%s",
- source_data[i].outer_prefix,
- source_data[i].inner_source,
- source_data[i].outer_suffix);
- CHECK(length == kProgramSize);
+ int kProgramByteSize = kPrefixByteLen + kInnerByteLen + kSuffixByteLen;
+ i::Vector<char> program = i::Vector<char>::New(kProgramByteSize + 1);
+ i::OS::SNPrintF(program, "%s%s%s",
+ source_data[i].outer_prefix,
+ source_data[i].inner_source,
+ source_data[i].outer_suffix);
// Parse program source.
i::Handle<i::String> source(
- FACTORY->NewStringFromAscii(i::CStrVector(program.start())));
+ FACTORY->NewStringFromUtf8(i::CStrVector(program.start())));
+ CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = FACTORY->NewScript(source);
i::Parser parser(script, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL);
i::CompilationInfo info(script);
@@ -894,7 +1047,7 @@ void TestParserSync(i::Handle<i::String> source, int flags) {
// Preparse the data.
i::CompleteParserRecorder log;
i::Scanner scanner(i::Isolate::Current()->unicode_cache());
- i::GenericStringUC16CharacterStream stream(source, 0, source->length());
+ i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner.SetHarmonyScoping(harmony_scoping);
scanner.Initialize(&stream);
v8::preparser::PreParser::PreParseResult result =
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index 532fc26a17..b6adf7f6cc 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -27,11 +27,89 @@
// Flags: --allow-natives-syntax
-// Test inlining functions that use arguments.
-function f() { return g(1, 2, 3); }
+function A() {
+}
-function g(x, y, z) { return %_ArgumentsLength(); }
+A.prototype.X = function (a, b, c) {
+ assertTrue(this instanceof A);
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+};
-for (var i = 0; i < 5; ++i) f();
-%OptimizeFunctionOnNextCall(f);
-assertEquals(3, f());
+A.prototype.Y = function () {
+ this.X.apply(this, arguments);
+};
+
+A.prototype.Z = function () {
+ this.Y(1,2,3);
+};
+
+var a = new A();
+a.Z(4,5,6);
+a.Z(4,5,6);
+%OptimizeFunctionOnNextCall(a.Z);
+a.Z(4,5,6);
+A.prototype.X.apply = function (receiver, args) {
+ return Function.prototype.apply.call(this, receiver, args);
+};
+a.Z(4,5,6);
+
+
+// Ensure that HArgumentsObject is inserted in a correct place
+// and dominates all uses.
+function F1() { }
+function F2() { F1.apply(this, arguments); }
+function F3(x, y) {
+ if (x) {
+ F2(y);
+ }
+}
+
+function F31() {
+ return F1.apply(this, arguments);
+}
+
+function F4() {
+ F3(true, false);
+ return F31(1);
+}
+
+F4(1);
+F4(1);
+F4(1);
+%OptimizeFunctionOnNextCall(F4);
+F4(1);
+
+
+// Test correct adapation of arguments.
+// Strict mode prevents arguments object from shadowing parameters.
+(function () {
+ "use strict";
+
+ function G2() {
+ assertArrayEquals([1,2], arguments);
+ }
+
+ function G4() {
+ assertArrayEquals([1,2,3,4], arguments);
+ }
+
+ function adapt2to4(a, b, c, d) {
+ G2.apply(this, arguments);
+ }
+
+ function adapt4to2(a, b) {
+ G4.apply(this, arguments);
+ }
+
+ function test_adaptation() {
+ adapt2to4(1, 2);
+ adapt4to2(1, 2, 3, 4);
+ }
+
+ test_adaptation();
+ test_adaptation();
+ %OptimizeFunctionOnNextCall(test_adaptation);
+ test_adaptation();
+})();
diff --git a/deps/v8/test/mjsunit/debug-set-script-source.js b/deps/v8/test/mjsunit/debug-set-script-source.js
new file mode 100644
index 0000000000..34ae8488a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-set-script-source.js
@@ -0,0 +1,64 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var script_number = 0;
+var script_names = [];
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.BeforeCompile) {
+ event_data.script().setSource(event_data.script().source() +
+ " //@ sourceURL=proper_location_" + (++script_number));
+ } else if (event == Debug.DebugEvent.AfterCompile) {
+ try {
+ event_data.script().setSource("a=1 //@ sourceURL=wrong_location");
+ } catch(e) {
+ exception = e;
+ }
+ script_names.push(event_data.script().name());
+ }
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Compile different sources.
+eval('a=1');
+eval('(function(){})');
+
+assertEquals(2, script_names.length);
+assertEquals("proper_location_1", script_names[0]);
+assertEquals("proper_location_2", script_names[1]);
+
+assertEquals("illegal access", exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/mjsunit/debug-stepin-function-call.js
index 385fcb2f8b..3b5240c933 100644
--- a/deps/v8/test/mjsunit/debug-stepin-function-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-function-call.js
@@ -135,8 +135,15 @@ function apply4() {
var yetAnotherLocal = 10;
}
+// Test step into bound function.
+function bind1() {
+ var bound = g.bind(null, 3);
+ debugger;
+ bound();
+}
+
var testFunctions =
- [call1, call2, call3, call4, apply1, apply2, apply3, apply4];
+ [call1, call2, call3, call4, apply1, apply2, apply3, apply4, bind1];
for (var i = 0; i < testFunctions.length; i++) {
state = 0;
@@ -145,5 +152,13 @@ for (var i = 0; i < testFunctions.length; i++) {
assertEquals(3, state);
}
+// Test global bound function.
+state = 0;
+var globalBound = g.bind(null, 3);
+debugger;
+globalBound();
+assertNull(exception);
+assertEquals(3, state);
+
// Get rid of the debug event listener.
Debug.setListener(null); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/getter-in-value-prototype.js b/deps/v8/test/mjsunit/getter-in-value-prototype.js
index b55320ac5b..abe2cb1934 100644
--- a/deps/v8/test/mjsunit/getter-in-value-prototype.js
+++ b/deps/v8/test/mjsunit/getter-in-value-prototype.js
@@ -31,5 +31,5 @@
// JSObject.
String.prototype.__defineGetter__('x', function() { return this; });
-assertEquals('asdf', 'asdf'.x);
+assertEquals(Object('asdf'), 'asdf'.x);
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index 50c8613b63..8d8f83996e 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -2257,3 +2257,22 @@ TestIsEnumerableThrow(Proxy.create({
return function(k) { throw "myexn" }
}
}))
+
+
+
+// Constructor functions with proxy prototypes.
+
+function TestConstructorWithProxyPrototype() {
+ TestWithProxies(TestConstructorWithProxyPrototype2, {})
+}
+
+function TestConstructorWithProxyPrototype2(create, handler) {
+ function C() {};
+ C.prototype = create(handler);
+
+ var o = new C;
+ assertSame(C.prototype, o.__proto__);
+ assertSame(C.prototype, Object.getPrototypeOf(o));
+}
+
+TestConstructorWithProxyPrototype();
diff --git a/deps/v8/test/mjsunit/pixel-array-rounding.js b/deps/v8/test/mjsunit/pixel-array-rounding.js
new file mode 100644
index 0000000000..ef5a10bdff
--- /dev/null
+++ b/deps/v8/test/mjsunit/pixel-array-rounding.js
@@ -0,0 +1,44 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var pixels = new PixelArray(8);
+
+function f() {
+ for (var i = 0; i < 8; i++) {
+ pixels[i] = (i * 1.1);
+ }
+ return pixels[1] + pixels[6];
+}
+
+f();
+f();
+assertEquals(6, pixels[5]);
+%OptimizeFunctionOnNextCall(f);
+f();
+assertEquals(6, pixels[5]);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index 76fa44be9c..ec82c96e09 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -701,3 +701,7 @@ assertThrows("RegExp('(*)')");
assertThrows("RegExp('(?:*)')");
assertThrows("RegExp('(?=*)')");
assertThrows("RegExp('(?!*)')");
+
+// Test trimmed regular expression for RegExp.test().
+assertTrue(/.*abc/.test("abc"));
+assertFalse(/.*\d+/.test("q"));
diff --git a/deps/v8/test/mjsunit/regress/regress-115452.js b/deps/v8/test/mjsunit/regress/regress-115452.js
new file mode 100644
index 0000000000..7e424ed88b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-115452.js
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that a function declaration cannot overwrite a read-only property.
+
+print(0)
+function foobl() {}
+assertTrue(typeof this.foobl == "function");
+assertTrue(Object.getOwnPropertyDescriptor(this, "foobl").writable);
+
+print(1)
+Object.defineProperty(this, "foobl", {value: 1, writable: false});
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
+
+print(2)
+eval("function foobl() {}");
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
+
+print(3)
+eval("function foobl() {}");
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
diff --git a/deps/v8/test/mjsunit/regress/regress-117794.js b/deps/v8/test/mjsunit/regress/regress-117794.js
new file mode 100644
index 0000000000..5e11b40035
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-117794.js
@@ -0,0 +1,57 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Loads specialized to be from the global object should not omit the
+// smi check on the receiver. The code below should not crash.
+
+print = function() {}
+
+function constructor() {};
+
+function assertHasOwnProperties(object, limit) {
+ for (var i = 0; i < limit; i++) { }
+}
+
+try {
+ Object.keys();
+} catch(exc2) {
+ print(exc2.stack);
+}
+
+var x1 = new Object();
+
+try {
+ new Function("A Man Called Horse", x1.d);
+} catch(exc3) {
+ print(exc3.stack);
+}
+
+try {
+ (-(true)).toPrecision(0x30, 'lib1-f1');
+} catch(exc1) {
+ print(exc1.stack);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-119925.js b/deps/v8/test/mjsunit/regress/regress-119925.js
new file mode 100644
index 0000000000..67127548c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-119925.js
@@ -0,0 +1,34 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that the throw is not inlined if object literals cannot be
+// inlined.
+Array.prototype.__proto__ = { 77e4 : null };
+function continueWithinLoop() {
+ for (var key in [(1.2)]) { }
+};
+continueWithinLoop();
diff --git a/deps/v8/test/mjsunit/regress/regress-1624-strict.js b/deps/v8/test/mjsunit/regress/regress-1624-strict.js
new file mode 100644
index 0000000000..8bc58d5abb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1624-strict.js
@@ -0,0 +1,140 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that global eval calls of strict code (independent from whether being
+// direct or indirect) have their own lexical and variable environment.
+
+"use strict";
+var evil = eval;
+
+// Test global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+eval('"use strict"; var no_touch = 1;');
+assertSame(0, no_touch);
+
+// Test global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+evil('"use strict"; var no_touch = 2;');
+assertSame(0, no_touch);
+
+// Test global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+eval('var no_touch = 3;');
+assertSame(0, no_touch);
+
+// Test global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+evil('var no_touch = 4;');
+assertSame(4, no_touch);
+
+// Test non-global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ eval('"use strict"; var no_touch = 5;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ evil('"use strict"; var no_touch = 6;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ eval('var no_touch = 7;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ evil('var no_touch = 8;');
+ assertSame(0, no_touch);
+})()
+assertSame(8, no_touch);
+
+// Test non-global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ eval('"use strict"; var no_touch = 9;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ evil('"use strict"; var no_touch = 10;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ eval('var no_touch = 11;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ evil('var no_touch = 12;');
+ assertSame(0, no_touch);
+})()
+assertSame(12, no_touch);
diff --git a/deps/v8/test/mjsunit/regress/regress-1624.js b/deps/v8/test/mjsunit/regress/regress-1624.js
new file mode 100644
index 0000000000..987e036d70
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1624.js
@@ -0,0 +1,139 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that global eval calls of strict code (independent from whether being
+// direct or indirect) have their own lexical and variable environment.
+
+var evil = eval;
+
+// Test global direct strict eval.
+// Expects new environment.
+var no_touch = 0;
+eval('"use strict"; var no_touch = 1;');
+assertSame(0, no_touch);
+
+// Test global indirect strict eval.
+// Expects new environment.
+var no_touch = 0;
+evil('"use strict"; var no_touch = 2;');
+assertSame(0, no_touch);
+
+// Test global direct non-strict eval.
+// Expects global environment.
+var no_touch = 0;
+eval('var no_touch = 3;');
+assertSame(3, no_touch);
+
+// Test global indirect non-strict eval.
+// Expects global environment.
+var no_touch = 0;
+evil('var no_touch = 4;');
+assertSame(4, no_touch);
+
+// Test non-global direct strict eval in non-strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ eval('"use strict"; var no_touch = 5;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in non-strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ evil('"use strict"; var no_touch = 6;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in non-strict function.
+// Expects function environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ eval('var no_touch = 7;');
+ assertSame(7, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in non-strict function.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+ var no_touch = 0;
+ evil('var no_touch = 8;');
+ assertSame(0, no_touch);
+})()
+assertSame(8, no_touch);
+
+// Test non-global direct strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ eval('"use strict"; var no_touch = 9;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ evil('"use strict"; var no_touch = 10;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ eval('var no_touch = 11;');
+ assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict function.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+ "use strict";
+ var no_touch = 0;
+ evil('var no_touch = 12;');
+ assertSame(0, no_touch);
+})()
+assertSame(12, no_touch);
diff --git a/deps/v8/test/mjsunit/regress/regress-1973.js b/deps/v8/test/mjsunit/regress/regress-1973.js
new file mode 100644
index 0000000000..8708bf1275
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1973.js
@@ -0,0 +1,52 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that getters and setters pass unwrapped this values in strict mode
+// and wrapped this values is non-strict mode.
+
+function TestAccessorWrapping(primitive) {
+ var prototype = Object.getPrototypeOf(Object(primitive))
+ // Check that strict mode passes unwrapped this value.
+ var strict_type = typeof primitive;
+ Object.defineProperty(prototype, "strict", {
+ get: function() { "use strict"; assertSame(strict_type, typeof this); },
+ set: function() { "use strict"; assertSame(strict_type, typeof this); }
+ });
+ primitive.strict = primitive.strict;
+ // Check that non-strict mode passes wrapped this value.
+ var sloppy_type = typeof Object(primitive);
+ Object.defineProperty(prototype, "sloppy", {
+ get: function() { assertSame(sloppy_type, typeof this); },
+ set: function() { assertSame(sloppy_type, typeof this); }
+ });
+ primitive.sloppy = primitive.sloppy;
+}
+
+TestAccessorWrapping(true);
+TestAccessorWrapping(0);
+TestAccessorWrapping({});
+TestAccessorWrapping("");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-119926.js b/deps/v8/test/mjsunit/regress/regress-crbug-119926.js
new file mode 100644
index 0000000000..26b84fad7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-119926.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that array elements don't break upon garbage collection.
+
+var a = new Array(500);
+for (var i = 0; i < 500000; i++) {
+ a[i] = new Object();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-sqrt.js b/deps/v8/test/mjsunit/regress/regress-sqrt.js
new file mode 100644
index 0000000000..f2a7e55242
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-sqrt.js
@@ -0,0 +1,47 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Check that Math.sqrt returns the same value regardless of being
+// optimized or not.
+
+function f(x) {
+ return Math.sqrt(x);
+}
+
+var x = 7.0506280066499245e-233;
+
+var a = f(x);
+
+f(0.1);
+f(0.2);
+%OptimizeFunctionOnNextCall(f);
+
+var b = f(x);
+
+assertEquals(a, b);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index cc2925d660..e64959acfc 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -69,6 +69,9 @@ js1_5/Array/regress-465980-02: SKIP
ecma_3/Date/15.9.3.2-1: SKIP
js1_2/function/Number: SKIP
+# TODO(2018): Temporarily allow timeout in debug mode.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT if $mode == debug
+
##################### SLOW TESTS #####################
# This takes a long time to run (~100 seconds). It should only be run
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 1a8a8dc7dd..67607fff9b 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -33,9 +33,6 @@ def FAIL_OK = FAIL, OKAY
# '__proto__' should be treated as a normal property in JSON.
S15.12.2_A1: FAIL
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1624
-S10.4.2.1_A1: FAIL
-
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1475
15.2.3.6-4-405: FAIL
15.2.3.6-4-410: FAIL
@@ -73,6 +70,19 @@ S7.8.4_A7.2_T6: FAIL_OK
S8.5_A2.2: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
S8.5_A2.1: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
+############################ INVALID TESTS #############################
+
+# The reference value calculated by Test262 is incorrect if you run these tests
+# in PST/PDT between first Sunday in March and first Sunday in April. The DST
+# switch was moved in 2007 whereas Test262 bases the reference value on 2000.
+# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
+S15.9.3.1_A5_T1: PASS || FAIL_OK
+S15.9.3.1_A5_T2: PASS || FAIL_OK
+S15.9.3.1_A5_T3: PASS || FAIL_OK
+S15.9.3.1_A5_T4: PASS || FAIL_OK
+S15.9.3.1_A5_T5: PASS || FAIL_OK
+S15.9.3.1_A5_T6: PASS || FAIL_OK
+
############################ SKIPPED TESTS #############################
# These tests take a looong time to run in debug mode.
diff --git a/deps/v8/tools/check-static-initializers.sh b/deps/v8/tools/check-static-initializers.sh
new file mode 100644
index 0000000000..18add3a63a
--- /dev/null
+++ b/deps/v8/tools/check-static-initializers.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Checks that the number of compilation units having at least one static
+# initializer in d8 matches the one defined below.
+# Note that the project must be built with SCons before running this script.
+
+# Allow:
+# - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
+# - _GLOBAL__I__ZN2v810LineEditor6first_E
+expected_static_init_count=2
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+d8="${v8_root}/d8"
+
+if [ ! -f "$d8" ]; then
+ echo "Please build the project with SCons."
+ exit 1
+fi
+
+static_inits=$(nm "$d8" | grep _GLOBAL__I | awk '{ print $NF; }')
+
+static_init_count=$(echo "$static_inits" | wc -l)
+
+if [ $static_init_count -gt $expected_static_init_count ]; then
+ echo "Too many static initializers."
+ echo "$static_inits"
+ exit 1
+fi
diff --git a/deps/v8/tools/common-includes.sh b/deps/v8/tools/common-includes.sh
index 98206899f4..8f0e78b601 100644
--- a/deps/v8/tools/common-includes.sh
+++ b/deps/v8/tools/common-includes.sh
@@ -36,6 +36,7 @@ TEMP_BRANCH=$BRANCHNAME-temporary-branch-created-by-script
VERSION_FILE="src/version.cc"
CHANGELOG_ENTRY_FILE="$PERSISTFILE_BASENAME-changelog-entry"
PATCH_FILE="$PERSISTFILE_BASENAME-patch"
+PATCH_OUTPUT_FILE="$PERSISTFILE_BASENAME-patch-output"
COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg"
TOUCHED_FILES_FILE="$PERSISTFILE_BASENAME-touched-files"
TRUNK_REVISION_FILE="$PERSISTFILE_BASENAME-trunkrevision"
@@ -59,7 +60,7 @@ confirm() {
}
delete_branch() {
- local MATCH=$(git branch | grep $1 | awk '{print $NF}' )
+ local MATCH=$(git branch | grep "$1" | awk '{print $NF}' | grep -x $1)
if [ "$MATCH" == "$1" ] ; then
confirm "Branch $1 exists, do you want to delete it?"
if [ $? -eq 0 ] ; then
@@ -174,8 +175,10 @@ the uploaded CL."
# Takes a file containing the patch to apply as first argument.
apply_patch() {
- patch -p1 < "$1" | tee >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
- [[ $? -eq 0 ]] || die "Applying the patch failed."
+ patch -p1 < "$1" > "$PATCH_OUTPUT_FILE" || \
+ { cat "$PATCH_OUTPUT_FILE" && die "Applying the patch failed."; }
+ tee < "$PATCH_OUTPUT_FILE" >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
+ rm "$PATCH_OUTPUT_FILE"
}
stage_files() {
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index b8769510c8..764789a8f4 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -40,10 +40,16 @@
'toolsets': ['target'],
}],
['v8_use_snapshot=="true"', {
- 'dependencies': ['v8_snapshot'],
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_snapshot'],
},
{
- 'dependencies': ['v8_nosnapshot'],
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_nosnapshot'],
}],
['component=="shared_library"', {
'type': '<(component)',
@@ -241,6 +247,7 @@
'../../src/assembler.h',
'../../src/ast.cc',
'../../src/ast.h',
+ '../../src/atomicops.h',
'../../src/atomicops_internals_x86_gcc.cc',
'../../src/bignum.cc',
'../../src/bignum.h',
@@ -353,6 +360,7 @@
'../../src/jsregexp.h',
'../../src/isolate.cc',
'../../src/isolate.h',
+ '../../src/lazy-instance.h'
'../../src/list-inl.h',
'../../src/list.h',
'../../src/lithium.cc',
@@ -383,6 +391,8 @@
'../../src/objects-visiting.h',
'../../src/objects.cc',
'../../src/objects.h',
+ '../../src/once.cc',
+ '../../src/once.h',
'../../src/parser.cc',
'../../src/parser.h',
'../../src/platform-tls-mac.h',
@@ -904,6 +914,8 @@
'../../include/v8stdint.h',
'../../src/allocation.cc',
'../../src/allocation.h',
+ '../../src/atomicops.h',
+ '../../src/atomicops_internals_x86_gcc.cc',
'../../src/bignum.cc',
'../../src/bignum.h',
'../../src/bignum-dtoa.cc',
@@ -929,6 +941,8 @@
'../../src/hashmap.h',
'../../src/list-inl.h',
'../../src/list.h',
+ '../../src/once.cc',
+ '../../src/once.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',
diff --git a/deps/v8/tools/merge-to-branch.sh b/deps/v8/tools/merge-to-branch.sh
index 484558cfad..49bf3e4489 100644
--- a/deps/v8/tools/merge-to-branch.sh
+++ b/deps/v8/tools/merge-to-branch.sh
@@ -48,6 +48,7 @@ to other branches, including trunk.
OPTIONS:
-h Show this message
-s Specify the step where to start work. Default: 0.
+ -p Specify a patch file to apply as part of the merge
EOF
}
@@ -61,17 +62,19 @@ restore_patch_commit_hashes() {
restore_patch_commit_hashes_if_unset() {
[[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && restore_patch_commit_hashes
- [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && \
+ [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && [[ -z "$EXTRA_PATCH" ]] && \
die "Variable PATCH_COMMIT_HASHES could not be restored."
}
########## Option parsing
-while getopts ":hs:f" OPTION ; do
+while getopts ":hs:fp:" OPTION ; do
case $OPTION in
h) usage
exit 0
;;
+ p) EXTRA_PATCH=$OPTARG
+ ;;
f) rm -f "$ALREADY_MERGING_SENTINEL_FILE"
;;
s) START_STEP=$OPTARG
@@ -88,13 +91,16 @@ shift $OPTION_COUNT
########## Regular workflow
# If there is a merge in progress, abort.
-[[ -e "$ALREADY_MERGING_SENTINEL_FILE" ]] && [[ -z "$START_STEP" ]] \
+[[ -e "$ALREADY_MERGING_SENTINEL_FILE" ]] && [[ $START_STEP -eq 0 ]] \
&& die "A merge is already in progress"
touch "$ALREADY_MERGING_SENTINEL_FILE"
initial_environment_checks
if [ $START_STEP -le $CURRENT_STEP ] ; then
+ if [ ${#@} -lt 2 ] && [ -z "$EXTRA_PATCH" ] ; then
+ die "Either a patch file or revision numbers must be specified"
+ fi
echo ">>> Step $CURRENT_STEP: Preparation"
MERGE_TO_BRANCH=$1
[[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
@@ -121,11 +127,15 @@ revisions associated with the patches."
[[ -n "$NEXT_HASH" ]] \
|| die "Cannot determine git hash for r$REVISION"
PATCH_COMMIT_HASHES[$current]="$NEXT_HASH"
- [[ -n "$NEW_COMMIT_MSG" ]] && NEW_COMMIT_MSG="$NEW_COMMIT_MSG,"
- NEW_COMMIT_MSG="$NEW_COMMIT_MSG r$REVISION"
+ [[ -n "$REVISION_LIST" ]] && REVISION_LIST="$REVISION_LIST,"
+ REVISION_LIST="$REVISION_LIST r$REVISION"
let current+=1
done
- NEW_COMMIT_MSG="Merged$NEW_COMMIT_MSG into $MERGE_TO_BRANCH branch."
+ if [ -z "$REVISION_LIST" ] ; then
+ NEW_COMMIT_MSG="Applied patch to $MERGE_TO_BRANCH branch."
+ else
+ NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch."
+ fi;
echo "$NEW_COMMIT_MSG" > $COMMITMSG_FILE
echo "" >> $COMMITMSG_FILE
@@ -145,6 +155,7 @@ revisions associated with the patches."
echo "BUG=$BUG_AGGREGATE" >> $COMMITMSG_FILE
fi
persist "NEW_COMMIT_MSG"
+ persist "REVISION_LIST"
persist_patch_commit_hashes
fi
@@ -159,6 +170,9 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE"
apply_patch "$TEMPORARY_PATCH_FILE"
done
+ if [ -n "$EXTRA_PATCH" ] ; then
+ apply_patch "$EXTRA_PATCH"
+ fi
stage_files
fi
@@ -234,10 +248,20 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
https://v8.googlecode.com/svn/$TO_URL \
https://v8.googlecode.com/svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH \
-m "Tagging version $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ persist "TO_URL"
fi
let CURRENT_STEP+=1
if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Cleanup."
+ restore_if_unset "SVN_REVISION"
+ restore_if_unset "TO_URL"
+ restore_if_unset "REVISION_LIST"
+ restore_version_if_unset "NEW"
common_cleanup
+ echo "*** SUMMARY ***"
+ echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ echo "branch: $TO_URL"
+ echo "svn revision: $SVN_REVISION"
+ [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
fi
diff --git a/deps/v8/tools/push-to-trunk.sh b/deps/v8/tools/push-to-trunk.sh
index c1f8e78594..3fb5b34ed3 100755
--- a/deps/v8/tools/push-to-trunk.sh
+++ b/deps/v8/tools/push-to-trunk.sh
@@ -332,6 +332,9 @@ if [ -n "$CHROME_PATH" ] ; then
# Check for a clean workdir.
[[ -z "$(git status -s -uno)" ]] \
|| die "Workspace is not clean. Please commit or undo your changes."
+ # Assert that the DEPS file is there.
+ [[ -w "DEPS" ]] || die "DEPS file not present or not writable; \
+current directory is: $(pwd)."
fi
let CURRENT_STEP+=1
@@ -348,7 +351,7 @@ if [ -n "$CHROME_PATH" ] ; then
if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Create and upload CL."
# Patch DEPS file.
- sed -e "/\"v8_revision\": /s/\"[0-9]+\"/\"$TRUNK_REVISION\"/" \
+ sed -r -e "/\"v8_revision\": /s/\"[0-9]+\"/\"$TRUNK_REVISION\"/" \
-i DEPS
restore_version_if_unset
echo -n "Please enter the email address of a reviewer for the roll CL: "
@@ -356,9 +359,9 @@ if [ -n "$CHROME_PATH" ] ; then
git commit -am "Update V8 to version $MAJOR.$MINOR.$BUILD.
TBR=$REVIEWER" || die "'git commit' failed."
- git cl upload --send-mail --use-commit-queue \
+ git cl upload --send-mail \
|| die "'git cl upload' failed, please try again."
- echo "CL uploaded and sent to commit queue."
+ echo "CL uploaded."
fi
let CURRENT_STEP+=1
diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py
index 465ca88c3d..fda4105a98 100755
--- a/deps/v8/tools/test-wrapper-gypbuild.py
+++ b/deps/v8/tools/test-wrapper-gypbuild.py
@@ -197,9 +197,9 @@ def PassOnOptions(options):
if options.crankshaft:
result += ['--crankshaft']
if options.shard_count != 1:
- result += ['--shard_count=%s' % options.shard_count]
+ result += ['--shard-count=%s' % options.shard_count]
if options.shard_run != 1:
- result += ['--shard_run=%s' % options.shard_run]
+ result += ['--shard-run=%s' % options.shard_run]
if options.noprof:
result += ['--noprof']
return result
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index 951afcc85d..0aacd993f3 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -472,7 +472,7 @@ def RunProcess(context, timeout, args, **rest):
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
- popen_args = '"' + subprocess.list2cmdline(args) + '"'
+ popen_args = subprocess.list2cmdline(args)
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
@@ -686,8 +686,8 @@ SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
- 'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
- 'release' : []}
+ 'debug' : ['--nobreak-on-abort', '--enable-slow-asserts', '--debug-code', '--verify-heap'],
+ 'release' : ['--nobreak-on-abort']}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }