summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-02-09 10:24:26 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-02-09 10:24:26 -0800
commita0702b54d1db35a6006644882c0b5420d8670958 (patch)
treee246bb342237e8caabd45450301cb7cc5e4ccf24 /deps/v8
parenta48a0755358d322f4a593d09c331432c10a500bc (diff)
downloadnode-new-a0702b54d1db35a6006644882c0b5420d8670958.tar.gz
Upgrade V8 to 3.1.2
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/ChangeLog12
-rw-r--r--deps/v8/LICENSE.strongtalk (renamed from deps/v8/src/third_party/strongtalk/LICENSE)10
-rw-r--r--deps/v8/LICENSE.v826
-rw-r--r--deps/v8/LICENSE.valgrind45
-rw-r--r--deps/v8/SConstruct2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc16
-rw-r--r--deps/v8/src/arm/assembler-arm.h18
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc446
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h39
-rw-r--r--deps/v8/src/arm/codegen-arm.cc12
-rw-r--r--deps/v8/src/arm/codegen-arm.h1
-rw-r--r--deps/v8/src/arm/constants-arm.h24
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc10
-rw-r--r--deps/v8/src/arm/ic-arm.cc25
-rw-r--r--deps/v8/src/arm/lithium-arm.cc117
-rw-r--r--deps/v8/src/arm/lithium-arm.h243
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc196
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h5
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc158
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h36
-rw-r--r--deps/v8/src/arm/simulator-arm.cc213
-rw-r--r--deps/v8/src/arm/simulator-arm.h8
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc230
-rw-r--r--deps/v8/src/array.js8
-rw-r--r--deps/v8/src/assembler.cc12
-rw-r--r--deps/v8/src/assembler.h35
-rw-r--r--deps/v8/src/code-stubs.h3
-rw-r--r--deps/v8/src/codegen-inl.h4
-rw-r--r--deps/v8/src/compilation-cache.cc21
-rw-r--r--deps/v8/src/compilation-cache.h3
-rwxr-xr-xdeps/v8/src/compiler.cc14
-rw-r--r--deps/v8/src/compiler.h13
-rw-r--r--deps/v8/src/conversions.cc78
-rw-r--r--deps/v8/src/deoptimizer.cc4
-rw-r--r--deps/v8/src/disassembler.cc4
-rw-r--r--deps/v8/src/extensions/gc-extension.cc6
-rw-r--r--deps/v8/src/full-codegen.cc2
-rw-r--r--deps/v8/src/full-codegen.h3
-rw-r--r--deps/v8/src/handles.cc2
-rw-r--r--deps/v8/src/heap-profiler.cc3
-rw-r--r--deps/v8/src/heap.cc11
-rw-r--r--deps/v8/src/heap.h11
-rw-r--r--deps/v8/src/hydrogen-instructions.cc10
-rw-r--r--deps/v8/src/hydrogen-instructions.h114
-rw-r--r--deps/v8/src/hydrogen.cc92
-rw-r--r--deps/v8/src/hydrogen.h2
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc48
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h19
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc10
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h1
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc115
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc153
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc22
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc45
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc98
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h238
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc20
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h11
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc31
-rw-r--r--deps/v8/src/ic.cc48
-rw-r--r--deps/v8/src/lithium-allocator-inl.h140
-rw-r--r--deps/v8/src/lithium-allocator.cc171
-rw-r--r--deps/v8/src/lithium-allocator.h118
-rw-r--r--deps/v8/src/lithium.h76
-rw-r--r--deps/v8/src/messages.js9
-rw-r--r--deps/v8/src/objects-inl.h28
-rw-r--r--deps/v8/src/objects.cc55
-rw-r--r--deps/v8/src/objects.h23
-rw-r--r--deps/v8/src/parser.cc126
-rw-r--r--deps/v8/src/parser.h8
-rw-r--r--deps/v8/src/preparser.cc31
-rw-r--r--deps/v8/src/preparser.h2
-rw-r--r--deps/v8/src/prettyprinter.cc25
-rw-r--r--deps/v8/src/runtime.cc192
-rw-r--r--deps/v8/src/runtime.h4
-rw-r--r--deps/v8/src/safepoint-table.cc22
-rw-r--r--deps/v8/src/safepoint-table.h11
-rw-r--r--deps/v8/src/scanner-base.cc61
-rw-r--r--deps/v8/src/scanner-base.h9
-rwxr-xr-xdeps/v8/src/scanner.cc19
-rw-r--r--deps/v8/src/scanner.h9
-rw-r--r--deps/v8/src/scopes.cc3
-rw-r--r--deps/v8/src/stub-cache.cc33
-rw-r--r--deps/v8/src/stub-cache.h4
-rw-r--r--deps/v8/src/third_party/strongtalk/README.chromium18
-rw-r--r--deps/v8/src/token.h37
-rw-r--r--deps/v8/src/top.h2
-rw-r--r--deps/v8/src/type-info.h6
-rw-r--r--deps/v8/src/uri.js3
-rw-r--r--deps/v8/src/v8globals.h6
-rw-r--r--deps/v8/src/v8natives.js146
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc27
-rw-r--r--deps/v8/src/x64/assembler-x64.h32
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc497
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h24
-rw-r--r--deps/v8/src/x64/codegen-x64.cc10
-rw-r--r--deps/v8/src/x64/codegen-x64.h1
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc147
-rw-r--r--deps/v8/src/x64/disasm-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.h5
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc41
-rw-r--r--deps/v8/src/x64/ic-x64.cc26
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc409
-rw-r--r--deps/v8/src/x64/lithium-x64.cc237
-rw-r--r--deps/v8/src/x64/lithium-x64.h65
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc4
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h29
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc29
-rw-r--r--deps/v8/test/cctest/cctest.status2
-rw-r--r--deps/v8/test/cctest/test-api.cc329
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc216
-rw-r--r--deps/v8/test/cctest/test-bignum-dtoa.cc4
-rw-r--r--deps/v8/test/cctest/test-dtoa.cc2
-rw-r--r--deps/v8/test/cctest/test-fast-dtoa.cc2
-rw-r--r--deps/v8/test/es5conform/es5conform.status220
-rw-r--r--deps/v8/test/mjsunit/get-own-property-descriptor.js16
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js137
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1083.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1092.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1099.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-900966.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-992.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-gc.js49
-rw-r--r--deps/v8/test/mjsunit/strict-mode-eval.js82
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js65
-rw-r--r--deps/v8/test/mozilla/mozilla.status15
-rw-r--r--deps/v8/tools/gyp/v8.gyp1
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj20
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj4
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj116
135 files changed, 5878 insertions, 1766 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index d85ef64d41..c68dadbe98 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -20,6 +20,7 @@ d8_g
shell
shell_g
/obj/
+/test/sputnik/sputniktests/
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index da864885ae..1b756caf27 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -26,6 +26,7 @@ Kun Zhang <zhangk@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Michael Smith <mike@w3.org>
+Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Rafal Krypa <rafal@krypa.net>
@@ -35,4 +36,4 @@ Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
-Mike Gilbert <floppymaster@gmail.com>
+Zaheer Ahmad <zahmad@codeaurora.org>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index f143a40cd2..d48ded840c 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,15 @@
+2011-02-07: Version 3.1.2
+
+ Added better security checks when accessing properties via
+ Object.getOwnPropertyDescriptor.
+
+ Fixed bug in Object.defineProperty and related access bugs (issues
+ 992, 1083 and 1092).
+
+ Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
+ copyright notice generation for embedders.
+
+
2011-02-02: Version 3.1.1
Perform security checks before fetching the value in
diff --git a/deps/v8/src/third_party/strongtalk/LICENSE b/deps/v8/LICENSE.strongtalk
index 7473a7b2b5..9bd62e4f23 100644
--- a/deps/v8/src/third_party/strongtalk/LICENSE
+++ b/deps/v8/LICENSE.strongtalk
@@ -6,15 +6,15 @@ modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
+this list of conditions and the following disclaimer.
- Redistribution in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
- Neither the name of Sun Microsystems or the names of contributors may
- be used to endorse or promote products derived from this software without
- specific prior written permission.
+be used to endorse or promote products derived from this software without
+specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
diff --git a/deps/v8/LICENSE.v8 b/deps/v8/LICENSE.v8
new file mode 100644
index 0000000000..933718a9ef
--- /dev/null
+++ b/deps/v8/LICENSE.v8
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/LICENSE.valgrind b/deps/v8/LICENSE.valgrind
new file mode 100644
index 0000000000..fd8ebaf509
--- /dev/null
+++ b/deps/v8/LICENSE.valgrind
@@ -0,0 +1,45 @@
+----------------------------------------------------------------
+
+Notice that the following BSD-style license applies to this one
+file (valgrind.h) only. The rest of Valgrind is licensed under the
+terms of the GNU General Public License, version 2, unless
+otherwise indicated. See the COPYING file in the source
+distribution for details.
+
+----------------------------------------------------------------
+
+This file is part of Valgrind, a dynamic binary instrumentation
+framework.
+
+Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index c9993991d5..bae1cd5e1d 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -136,7 +136,7 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
+ 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index ebf040d904..243ba4978a 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -2124,7 +2124,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
const int dst_code,
const VFPType src_type,
const int src_code,
- Assembler::ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
@@ -2167,7 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
@@ -2176,7 +2176,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
@@ -2185,7 +2185,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
@@ -2194,7 +2194,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
@@ -2203,7 +2203,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
@@ -2212,7 +2212,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
@@ -2221,7 +2221,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index a6edf66ec8..fc826c727e 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -942,37 +942,33 @@ class Assembler : public Malloced {
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
- enum ConversionMode {
- FPSCRRounding = 0,
- RoundToZero = 1
- };
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 590d8ce15e..437dfd2733 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -396,6 +396,19 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Label* not_number);
+
+ // Loads the number from object into dst as a 32-bit integer if possible. If
+ // the object is not a 32-bit integer control continues at the label
+ // not_int32. If VFP is supported double_scratch is used but not scratch2.
+ static void LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
@@ -461,15 +474,21 @@ void FloatingPointHelper::LoadOperands(
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
+ Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
@@ -514,6 +533,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi, done;
+ __ JumpIfSmi(object, &is_smi);
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ cmp(scratch1, heap_number_map);
+ __ b(ne, not_int32);
+ __ ConvertToInt32(
+ object, dst, scratch1, scratch2, double_scratch, not_int32);
+ __ jmp(&done);
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
+}
+
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1676,7 +1723,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, &slow);
+ __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
__ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1));
@@ -1687,7 +1734,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, &slow);
+ __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
__ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1));
@@ -2529,6 +2576,18 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ and_(right, left, Operand(scratch1));
__ Ret();
break;
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ __ Ret();
+ break;
default:
UNREACHABLE();
}
@@ -2545,90 +2604,179 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch1 = r7;
Register scratch2 = r9;
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending
- // on whether VFP3 is available.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Allocate new heap number for result.
- Register result = r5;
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
+ // depending on whether VFP3 is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = r5;
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
- // Load the operands.
- if (smi_operands) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Using VFP registers:
+ // d6: Left value
+ // d7: Right value
+ CpuFeatures::Scope scope(VFP3);
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP3);
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
+ __ sub(r0, result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(scratch1, result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, result, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
#endif
- __ mov(r0, Operand(result));
- // And we are done.
- __ pop(pc);
+ // Plase result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(result));
+ __ pop(pc);
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ if (smi_operands) {
+ __ SmiUntag(r3, left);
+ __ SmiUntag(r2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in r2 and left in r3.
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ }
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Label result_not_a_smi;
+ // Check that the *signed* result fits in a smi.
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ SmiTag(r0, r2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ __ AllocateHeapNumber(
+ r5, scratch1, scratch2, heap_number_map, gc_required);
+
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
}
}
@@ -2646,7 +2794,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Register left = r1;
Register right = r0;
@@ -2678,7 +2829,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
@@ -2714,7 +2868,10 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -2727,7 +2884,10 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@@ -2747,7 +2907,10 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Label call_runtime;
@@ -2812,6 +2975,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_JS);
break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
default:
UNREACHABLE();
}
@@ -3037,7 +3209,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, &slow);
+ __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.
@@ -3329,9 +3501,17 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r6, r6, Operand(kPointerSize));
+
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(save_doubles_);
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(r4, Operand(r0));
+ __ mov(r5, Operand(r1));
+
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
// r6: pointer to first argument (C callee-saved)
@@ -5734,6 +5914,90 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ __ ldr(pc, MemOperand(sp, 0));
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ApiFunction *function) {
+ __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET));
+ // Push return address (accessible to GC through exit frame pc).
+ __ mov(r2,
+ Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+ __ str(pc, MemOperand(sp, 0));
+ __ Jump(r2); // Call the api function.
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - set to be the receiver's elements on exit.
+ //
+ // elements_map - set to be the map of the receiver's elements
+ // on exit.
+ //
+ // result - holds the result of the pixel array load on exit,
+ // tagged as a smi if successful.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used a scratch register in map check, if map
+ // check is successful, contains the length of the
+ // pixel array, the pointer to external elements and
+ // the untagged result.
+ //
+ // scratch2 - holds the untaged key.
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiUntag(scratch2, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
+ not_pixel_array, true);
+
+ // Key must be in range of the pixel array.
+ __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
+ __ cmp(scratch2, scratch1);
+ __ b(hs, out_of_range); // unsigned check handles negative keys.
+
+ // Perform the indexed load and tag the result as a smi.
+ __ ldr(scratch1,
+ FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+ __ ldrb(scratch1, MemOperand(scratch1, scratch2));
+ __ SmiTag(r0, scratch1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 9e9047e579..bf7d635487 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -571,6 +571,45 @@ class RegExpCEntryStub: public CodeStub {
};
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "DirectCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 9a60183979..12842230bf 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1110,7 +1110,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() {
Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+ __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
// tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int.
@@ -4177,7 +4177,10 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump();
slow.Bind();
@@ -4197,8 +4200,11 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
// Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 589e704b51..8f46256b8a 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -287,6 +287,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 7502ef0d65..5671feecba 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -380,10 +380,13 @@ enum VFPRegPrecision {
// VFP FPSCR constants.
+enum VFPConversionMode {
+ kFPSCRRounding = 0,
+ kDefaultRoundToZero = 1
+};
+
static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
@@ -393,13 +396,20 @@ static const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum FPSCRRoundingModes {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
};
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
// -----------------------------------------------------------------------------
// Hints.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 75399c69db..e05001f3c3 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -97,7 +97,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
- (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+ (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 23e5f69ebe..ff446c5e4b 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -1554,7 +1554,10 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
op == Token::SUB ||
op == Token::MUL ||
op == Token::DIV ||
- op == Token::MOD) {
+ op == Token::MOD ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_AND ||
+ op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
@@ -1923,7 +1926,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ ldr(r1,
MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index d74468c945..1aa031d39b 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1189,19 +1189,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// r0: key
// r1: receiver
__ bind(&check_pixel_array);
- __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &check_number_dictionary);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ cmp(r2, ip);
- __ b(hs, &slow);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
- __ ldrb(r2, MemOperand(ip, r2));
- __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi.
- __ Ret();
+
+ GenerateFastPixelArrayLoad(masm,
+ r1,
+ r0,
+ r3,
+ r4,
+ r2,
+ r5,
+ r0,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index b7c1b78d1b..f672d4908e 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
@@ -56,6 +57,31 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index,
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs can use either fixed register or have a short lifetime (be
+ // used at start of the instruction).
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ LUnallocated::cast(operand)->IsUsedAtStart() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot());
@@ -66,9 +92,8 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -158,6 +183,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
default:
UNREACHABLE();
return NULL;
@@ -258,7 +286,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
}
@@ -390,7 +426,7 @@ void LChunk::MarkEmptyBlocks() {
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -406,7 +442,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -672,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -697,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -742,13 +780,23 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
@@ -887,7 +935,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -910,11 +957,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -1105,13 +1148,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1514,7 +1570,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new LDoubleToI(value);
+ LDoubleToI* res = new LDoubleToI(value, TempRegister());
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1621,7 +1677,20 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- return DefineAsRegister(new LLoadContextSlot);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context = UseTempRegister(instr->context());
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ value = UseTempRegister(instr->value());
+ } else {
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 7f89ee2922..a076c80c75 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -39,118 +39,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LStringCharCodeAt
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LStringLength
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
@@ -187,6 +75,8 @@ class LCodeGen;
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
@@ -197,6 +87,7 @@ class LCodeGen;
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
@@ -207,6 +98,10 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
@@ -218,22 +113,16 @@ class LCodeGen;
V(IsSmi) \
V(IsSmiAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -241,6 +130,7 @@ class LCodeGen;
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -249,14 +139,15 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(SubI) \
V(StringCharCodeAt) \
V(StringLength) \
+ V(SubI) \
V(TaggedToI) \
V(Throw) \
V(Typeof) \
@@ -290,7 +181,10 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -307,16 +201,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -330,11 +222,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
@@ -361,6 +277,11 @@ class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
@@ -1266,18 +1187,41 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@@ -1288,15 +1232,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1431,10 +1405,11 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LDoubleToI(LOperand* value) {
+ explicit LDoubleToI(LOperand* value, LOperand* temp1) {
inputs_[0] = value;
+ temps_[0] = temp1;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1789,7 +1764,7 @@ class LChunk: public ZoneObject {
public:
explicit LChunk(HGraph* graph);
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index a1adae389d..855ed461b5 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -223,7 +223,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -1174,7 +1174,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// scratch:left = left * right.
- __ smull(scratch, left, left, right);
+ __ smull(left, scratch, left, right);
__ mov(ip, Operand(left, ASR, 31));
__ cmp(ip, Operand(scratch));
DeoptimizeIf(ne, instr->environment());
@@ -1398,7 +1398,18 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vdiv(left, left, right);
break;
case Token::MOD: {
- Abort("DoArithmeticD unimplemented for MOD.");
+ // Save r0-r3 on the stack.
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+
+ __ PrepareCallCFunction(4, scratch0());
+ __ vmov(r0, r1, left);
+ __ vmov(r2, r3, right);
+ __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+ // Move the result in the double result register.
+ __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+
+ // Restore r0-r3.
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
break;
}
default:
@@ -1595,17 +1606,58 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right));
- Abort("EmitCmpI untested.");
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("DoCmpID unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ Register scratch = scratch0();
+
+ Label unordered, done;
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ vmrs(pc);
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to unordered to return false.
+ __ b(vs, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ b(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("DoCmpIDAndBranch unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ vmrs(pc);
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
@@ -2201,13 +2253,27 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- // TODO(antonm): load a context with a separate instruction.
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadContext(result, instr->context_chain_length());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ ldr(result, ContextOperand(result, instr->slot_index()));
}
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ ldr(context,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(value, ContextOperand(context, instr->slot_index()));
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, Operand(offset), value, scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2458,16 +2524,32 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -2625,34 +2707,53 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register prev_fpscr = ToRegister(instr->TempAt(0));
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch = scratch0();
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2) {
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
// Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity".
+ // - Set rounding mode.
// - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset.
__ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+ __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ __ orr(scratch, scratch, Operand(rounding_mode));
__ vmsr(scratch);
// Convert the argument to an integer.
- __ vcvt_s32_f64(single_scratch,
- input,
- Assembler::FPSCRRounding,
- al);
+ __ vcvt_s32_f64(result,
+ double_input,
+ kFPSCRRounding);
- // Retrieve FPSCR and check for vfp exceptions.
+ // Retrieve FPSCR.
__ vmrs(scratch);
- // Restore FPSCR
+ // Restore FPSCR.
__ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
__ tst(scratch, Operand(kVFPExceptionMask));
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
@@ -2662,8 +2763,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
@@ -3297,7 +3398,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("DoDoubleToI unimplemented.");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ DoubleRegister double_input = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
+ : kRoundToNearest;
+
+ EmitVFPTruncate(rounding_mode,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+ // Deoptimize if we had a vfp invalid exception.
+ DeoptimizeIf(ne, instr->environment());
+ // Retrieve the result.
+ __ vmov(result_reg, single_scratch);
+
+ if (instr->truncating() &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand(0));
+ __ b(ne, &done);
+ // Check for -0.
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+
+ __ bind(&done);
+ }
}
@@ -3497,7 +3633,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = !instr->hydrogen()->pretenure();
+ bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ mov(r1, Operand(shared_info));
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 27a72f29a0..3f7fe4519b 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -219,6 +219,11 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
+ void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index ef351022a8..c11d664f07 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -632,11 +632,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- // Compute the argv pointer in a callee-saved register.
- add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- sub(r6, r6, Operand(kPointerSize));
-
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Setup the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -658,10 +654,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
mov(ip, Operand(ExternalReference(Top::k_context_address)));
str(cp, MemOperand(ip));
- // Setup argc and the builtin function in callee-saved registers.
- mov(r4, Operand(r0));
- mov(r5, Operand(r1));
-
// Optionally save all double registers.
if (save_doubles) {
sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
@@ -675,10 +667,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// since the sp slot and code slot were pushed after the fp.
}
- // Reserve place for the return address and align the frame preparing for
- // calling the runtime function.
+ // Reserve place for the return address and stack space and align the frame
+ // preparing for calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- sub(sp, sp, Operand(kPointerSize));
+ sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
@@ -1475,14 +1467,112 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+ ApiFunction* function, int stack_space) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(),
+ next_address);
+
+ // Allocate HandleScope in callee-save registers.
+ mov(r7, Operand(next_address));
+ ldr(r4, MemOperand(r7, kNextOffset));
+ ldr(r5, MemOperand(r7, kLimitOffset));
+ ldr(r6, MemOperand(r7, kLevelOffset));
+ add(r6, r6, Operand(1));
+ str(r6, MemOperand(r7, kLevelOffset));
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, function);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ // If result is non-zero, dereference to get the result value
+ // otherwise set it to undefined.
+ cmp(r0, Operand(0));
+ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ ldr(r0, MemOperand(r0), ne);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ str(r4, MemOperand(r7, kNextOffset));
+ if (FLAG_debug_code) {
+ ldr(r1, MemOperand(r7, kLevelOffset));
+ cmp(r1, r6);
+ Check(eq, "Unexpected level after return from api call");
+ }
+ sub(r6, r6, Operand(1));
+ str(r6, MemOperand(r7, kLevelOffset));
+ ldr(ip, MemOperand(r7, kLimitOffset));
+ cmp(r5, ip);
+ b(ne, &delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+ ldr(r5, MemOperand(ip));
+ cmp(r4, r5);
+ b(ne, &promote_scheduled_exception);
+
+ // LeaveExitFrame expects unwind space to be in r4.
+ mov(r4, Operand(stack_space));
+ LeaveExitFrame(false);
+
+ bind(&promote_scheduled_exception);
+ MaybeObject* result = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ str(r5, MemOperand(r7, kLimitOffset));
+ mov(r4, r0);
+ PrepareCallCFunction(0, r5);
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+ mov(r0, r4);
+ jmp(&leave_exit_frame);
+
+ return result;
}
@@ -1577,13 +1667,14 @@ void MacroAssembler::ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
+ DwVfpRegister double_scratch,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
- vldr(d0, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(s0, d0);
- vmov(dest, s0);
+ vldr(double_scratch, scratch, HeapNumber::kValueOffset);
+ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ vmov(dest, double_scratch.low());
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the
@@ -1739,6 +1830,17 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ return TryJumpToExternalReference(ext);
+}
+
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -1757,6 +1859,18 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+ const ExternalReference& builtin) {
+#if defined(__thumb__)
+ // Thumb mode builtin.
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+ mov(r1, Operand(builtin));
+ CEntryStub stub(1);
+ return TryTailCallStub(&stub);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator) {
@@ -1999,6 +2113,16 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(ip));
+ LoadRoot(ip, root_value_index);
+ cmp(src, ip);
+ Assert(eq, message);
+}
+
+
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index a1a13e32ef..c9ffde8981 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -287,10 +287,8 @@ class MacroAssembler: public Assembler {
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame.
- // Expects the number of arguments in register r0 and
- // the builtin function to call in register r1. Exits with argc in
- // r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(bool save_doubles);
+ // stack_space - extra stack space, used for alignment before call to C.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(bool save_doubles);
@@ -589,11 +587,13 @@ class MacroAssembler: public Assembler {
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label.
+ // to not_int32 label. If VFP3 is available double_scratch is used but not
+ // scratch2.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
+ DwVfpRegister double_scratch,
Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
@@ -614,6 +614,12 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+ Condition cond = al);
+
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -632,6 +638,12 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ // Tail call of a runtime routine (jump). Try to generate the code if
+ // necessary. Do not perform a GC but instead return a retry after GC
+ // failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size);
+
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -655,9 +667,18 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context.
+ // stack_space - space to be unwound on exit (includes the call js
+ // arguments space and the additional space allocated for the fast call).
+ MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
+ int stack_space);
+
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
+ MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -763,6 +784,11 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
// ---------------------------------------------------------------------------
// HeapNumber utilities
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index de440306c9..8104747f14 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -744,10 +744,10 @@ Simulator::Simulator() {
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, bool fp_return)
+ Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
- fp_return_(fp_return),
+ type_(type),
next_(list_) {
Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_),
@@ -760,14 +760,15 @@ class Redirection {
}
void* external_function() { return external_function_; }
- bool fp_return() { return fp_return_; }
+ ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function, bool fp_return) {
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
Redirection* current;
for (current = list_; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, fp_return);
+ return new Redirection(external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -780,7 +781,7 @@ class Redirection {
private:
void* external_function_;
uint32_t swi_instruction_;
- bool fp_return_;
+ ExternalReference::Type type_;
Redirection* next_;
static Redirection* list_;
};
@@ -790,8 +791,8 @@ Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function,
- bool fp_return) {
- Redirection* redirection = Redirection::Get(external_function, fp_return);
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1528,6 +1529,9 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg2,
int32_t arg3);
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1550,9 +1554,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
- if (redirection->fp_return()) {
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1568,9 +1572,28 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
+ } else if (redirection->type() == ExternalReference::DIRECT_CALL) {
+ SimulatorRuntimeApiCall target =
+ reinterpret_cast<SimulatorRuntimeApiCall>(external);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF(
+ "Call to host function at %p args %08x",
+ FUNCTION_ADDR(target),
+ arg0);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ v8::Handle<v8::Value> result = target(arg0);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, (int32_t) *result);
} else {
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
+ // builtin call.
+ ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -2539,7 +2562,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(overflow_vfp_flag_ << 2) |
(div_zero_vfp_flag_ << 1) |
(inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_ << 22);
+ (FPSCR_rounding_mode_);
set_register(rt, fpscr);
}
} else if ((instr->VLValue() == 0x0) &&
@@ -2562,7 +2585,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
div_zero_vfp_flag_ = (rt_value >> 1) & 1;
inv_op_vfp_flag_ = (rt_value >> 0) & 1;
FPSCR_rounding_mode_ =
- static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3);
+ static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
}
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2651,87 +2674,135 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
}
}
+bool get_inv_op_vfp_flag(VFPRoundingMode mode,
+ double val,
+ bool unsigned_) {
+ ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+ double max_uint = static_cast<double>(0xffffffffu);
+ double max_int = static_cast<double>(kMaxInt);
+ double min_int = static_cast<double>(kMinInt);
+
+ // Check for NaN.
+ if (val != val) {
+ return true;
+ }
+
+ // Check for overflow. This code works because 32bit integers can be
+ // exactly represented by ieee-754 64bit floating-point values.
+ switch (mode) {
+ case RN:
+ return unsigned_ ? (val >= (max_uint + 0.5)) ||
+ (val < -0.5)
+ : (val >= (max_int + 0.5)) ||
+ (val < (min_int - 0.5));
+
+ case RM:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val < 0)
+ : (val >= (max_int + 1.0)) ||
+ (val < min_int);
+
+ case RZ:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val <= -1)
+ : (val >= (max_int + 1.0)) ||
+ (val <= (min_int - 1.0));
+ default:
+ UNREACHABLE();
+ return true;
+ }
+}
+
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+int VFPConversionSaturate(double val, bool unsigned_res) {
+ if (val != val) {
+ return 0;
+ } else {
+ if (unsigned_res) {
+ return (val < 0) ? 0 : 0xffffffffu;
+ } else {
+ return (val < 0) ? kMinInt : kMaxInt;
+ }
+ }
+}
+
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+ (instr->Bits(27, 23) == 0x1D));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1);
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- src_precision = kDoublePrecision;
- }
+ VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
+ : kSinglePrecision;
if (to_integer) {
- bool unsigned_integer = (instr->Bit(16) == 0);
- FPSCRRoundingModes mode;
- if (instr->Bit(7) != 1) {
- // Use FPSCR defined rounding mode.
- mode = FPSCR_rounding_mode_;
- // Only RZ and RM modes are supported.
- ASSERT((mode == RM) || (mode == RZ));
- } else {
- // VFP uses round towards zero by default.
- mode = RZ;
- }
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note:
+ // C++ defines default type casting from floating point to integer as
+ // (close to) rounding toward zero ("fractional part discarded").
int dst = instr->VFPDRegValue(kSinglePrecision);
int src = instr->VFPMRegValue(src_precision);
- int32_t kMaxInt = v8::internal::kMaxInt;
- int32_t kMinInt = v8::internal::kMinInt;
- switch (mode) {
- case RM:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+ // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
+ // mode or the default Round to Zero mode.
+ VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
+ : RZ;
+ ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
+ bool unsigned_integer = (instr->Bit(16) == 0);
+ bool double_precision = (src_precision == kDoublePrecision);
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
+ double val = double_precision ? get_double_from_d_register(src)
+ : get_float_from_s_register(src);
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+ int temp = unsigned_integer ? static_cast<uint32_t>(val)
+ : static_cast<int32_t>(val);
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
- set_s_register_from_sinteger(dst, sint);
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
+ } else {
+ switch (mode) {
+ case RN: {
+ double abs_diff =
+ unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
+ : fabs(val - temp);
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ temp += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+ }
+ break;
}
- break;
- case RZ:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
+ case RM:
+ temp = temp > val ? temp - 1 : temp;
+ break;
- set_s_register_from_sinteger(dst, sint);
- }
- break;
+ case RZ:
+ // Nothing to do.
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
}
+ // Update the destination register.
+ set_s_register_from_sinteger(dst, temp);
+
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index be44766d54..5256ae35b9 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -79,6 +79,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h"
#include "hashmap.h"
+#include "assembler.h"
namespace v8 {
namespace internal {
@@ -285,8 +286,9 @@ class Simulator {
static CachePage* GetCachePage(void* page);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
- bool fp_return);
+ static void* RedirectExternalReference(
+ void* external_function,
+ v8::internal::ExternalReference::Type type);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
@@ -312,7 +314,7 @@ class Simulator {
bool v_flag_FPSCR_;
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
- FPSCRRoundingModes FPSCR_rounding_mode_;
+ VFPRoundingMode FPSCR_rounding_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 1e99e60694..9ef61158ea 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -575,72 +575,94 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ CallStub(&stub);
}
+static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
Register scratch) {
__ mov(scratch, Operand(Smi::FromInt(0)));
- __ push(scratch);
- __ push(scratch);
- __ push(scratch);
- __ push(scratch);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ push(scratch);
+ }
}
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(4);
+ __ Drop(kFastApiCallArguments);
}
-// Generates call to FastHandleApiCall builtin.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : holder (set by CheckPrototypes)
+ // -- sp[4] : callee js function
+ // -- sp[8] : call data
+ // -- sp[12] : last js argument
+ // -- ...
+ // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 4) * 4] : receiver
+ // -----------------------------------
// Get the function and setup the context.
JSFunction* function = optimization.constant_function();
__ mov(r5, Operand(Handle<JSFunction>(function)));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- bool info_loaded = false;
- Object* callback = optimization.api_call_info()->callback();
- if (Heap::InNewSpace(callback)) {
- info_loaded = true;
- __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
- __ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
- } else {
- __ Move(r7, Handle<Object>(callback));
- }
Object* call_data = optimization.api_call_info()->data();
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
if (Heap::InNewSpace(call_data)) {
- if (!info_loaded) {
- __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
- }
+ __ Move(r0, api_call_info_handle);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
__ Move(r6, Handle<Object>(call_data));
}
+ // Store js function and call data.
+ __ stm(ib, sp, r5.bit() | r6.bit());
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit());
- __ sub(sp, sp, Operand(1 * kPointerSize));
-
- // Set the number of arguments.
- __ mov(r0, Operand(argc + 4));
+ // r2 points to call data as expected by Arguments
+ // (refer to layout above).
+ __ add(r2, sp, Operand(2 * kPointerSize));
- // Jump to the fast api call builtin (tail call).
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::FastHandleApiCall));
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ Object* callback = optimization.api_call_info()->callback();
+ Address api_function_address = v8::ToCData<Address>(callback);
+ ApiFunction fun(api_function_address);
+
+ const int kApiStackSpace = 4;
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // r0 = v8::Arguments&
+ // Arguments is after the return address.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+ // v8::Arguments::implicit_args = data
+ __ str(r2, MemOperand(r0, 0 * kPointerSize));
+ // v8::Arguments::values = last argument
+ __ add(ip, r2, Operand(argc * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // v8::Arguments::length_ = argc
+ __ mov(ip, Operand(argc));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // v8::Arguments::is_construct_call = 0
+ __ mov(ip, Operand(0));
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ MaybeObject* result = masm->TryCallApiFunctionAndReturn(
+ &fun, argc + kFastApiCallArguments + 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+ return Heap::undefined_value();
}
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -650,16 +672,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_(arguments),
name_(name) {}
- void Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -669,17 +691,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallOptimization optimization(lookup);
if (optimization.is_constant_call()) {
- CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ return CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
} else {
CompileRegular(masm,
object,
@@ -690,21 +712,22 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
+ return Heap::undefined_value();
}
}
private:
- void CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -768,7 +791,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ MaybeObject* result = GenerateFastApiDirectCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -786,6 +812,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
+
+ return Heap::undefined_value();
}
void CompileRegular(MacroAssembler* masm,
@@ -2055,11 +2083,11 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits));
+ __ orr(r9, r9, Operand(kRoundToMinusInf));
__ vmsr(r9);
// Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
// Use vcvt latency to start checking for special cases.
// Get the argument exponent and clear the sign bit.
@@ -2368,7 +2396,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
if (depth != kInvalidProtoDepth) {
- GenerateFastApiCall(masm(), optimization, argc);
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2412,16 +2441,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), r2);
- compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ r1,
+ r3,
+ r4,
+ r0,
+ &miss);
+ if (result->IsFailure()) {
+ return result;
+ }
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
@@ -3087,6 +3119,38 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ r1,
+ r0,
+ r2,
+ r3,
+ r4,
+ r5,
+ r0,
+ &miss,
+ &miss,
+ &miss);
+
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -3764,9 +3828,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
+ __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
} else {
- __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
+ __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
}
__ vmov(r5, s0, ne);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index a0e3e0bb4e..1298434d59 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1018,9 +1018,11 @@ function ArrayIndexOf(element, index) {
} else {
index = TO_INTEGER(index);
// If index is negative, index from the end of the array.
- if (index < 0) index = length + index;
- // If index is still negative, search the entire array.
- if (index < 0) index = 0;
+ if (index < 0) {
+ index = length + index;
+ // If index is still negative, search the entire array.
+ if (index < 0) index = 0;
+ }
}
var min = index;
var max = length;
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index ca72d63992..ef2094f63a 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -553,8 +553,9 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {}
-ExternalReference::ExternalReference(ApiFunction* fun)
- : address_(Redirect(fun->address())) {}
+ExternalReference::ExternalReference(
+ ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
+ : address_(Redirect(fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name)
@@ -888,17 +889,18 @@ ExternalReference ExternalReference::double_fp_operation(
UNREACHABLE();
}
// Passing true as 2nd parameter indicates that they return an fp value.
- return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
+ return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
}
ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
- false));
+ BUILTIN_CALL));
}
-ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+ExternalReference::ExternalReferenceRedirector*
+ ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a29aa064b8..e8bc5d6caa 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -459,9 +459,6 @@ class Debug_Address;
#endif
-typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
-
-
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
// an ExternalReference instance. This is done in order to track the origin of
@@ -469,9 +466,29 @@ typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
// addresses when deserializing a heap.
class ExternalReference BASE_EMBEDDED {
public:
+ // Used in the simulator to support different native api calls.
+ //
+ // BUILTIN_CALL - builtin call.
+ // MaybeObject* f(v8::internal::Arguments).
+ //
+ // FP_RETURN_CALL - builtin call that returns floating point.
+ // double f(double, double).
+ //
+ // DIRECT_CALL - direct call to API function native callback
+ // from generated code.
+ // Handle<Value> f(v8::Arguments&)
+ //
+ enum Type {
+ BUILTIN_CALL, // default
+ FP_RETURN_CALL,
+ DIRECT_CALL
+ };
+
+ typedef void* ExternalReferenceRedirector(void* original, Type type);
+
explicit ExternalReference(Builtins::CFunctionId id);
- explicit ExternalReference(ApiFunction* ptr);
+ explicit ExternalReference(ApiFunction* ptr, Type type);
explicit ExternalReference(Builtins::Name name);
@@ -599,17 +616,19 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReferenceRedirector* redirector_;
- static void* Redirect(void* address, bool fp_return = false) {
+ static void* Redirect(void* address,
+ Type type = ExternalReference::BUILTIN_CALL) {
if (redirector_ == NULL) return address;
- void* answer = (*redirector_)(address, fp_return);
+ void* answer = (*redirector_)(address, type);
return answer;
}
- static void* Redirect(Address address_arg, bool fp_return = false) {
+ static void* Redirect(Address address_arg,
+ Type type = ExternalReference::BUILTIN_CALL) {
void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector_ == NULL) ?
address :
- (*redirector_)(address, fp_return);
+ (*redirector_)(address, type);
return answer;
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index f80c89b8f9..0d0e37ffac 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -75,7 +75,8 @@ namespace internal {
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
- V(RegExpCEntry)
+ V(RegExpCEntry) \
+ V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
#endif
diff --git a/deps/v8/src/codegen-inl.h b/deps/v8/src/codegen-inl.h
index 6534e7fd63..54677894cd 100644
--- a/deps/v8/src/codegen-inl.h
+++ b/deps/v8/src/codegen-inl.h
@@ -55,6 +55,10 @@ bool CodeGenerator::is_eval() { return info_->is_eval(); }
Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+StrictModeFlag CodeGenerator::strict_mode_flag() {
+ return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
+}
+
} } // namespace v8::internal
#endif // V8_CODEGEN_INL_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 38438cb913..cccb7a4f21 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -136,7 +136,8 @@ class CompilationCacheEval: public CompilationSubCache {
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context);
+ Handle<Context> context,
+ StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
@@ -371,7 +372,9 @@ void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
- Handle<String> source, Handle<Context> context) {
+ Handle<String> source,
+ Handle<Context> context,
+ StrictModeFlag strict_mode) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
@@ -380,7 +383,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
{ HandleScope scope;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(*source, *context);
+ result = table->LookupEval(*source, *context, strict_mode);
if (result->IsSharedFunctionInfo()) {
break;
}
@@ -503,18 +506,20 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
}
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global) {
+Handle<SharedFunctionInfo> CompilationCache::LookupEval(
+ Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ StrictModeFlag strict_mode) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
Handle<SharedFunctionInfo> result;
if (is_global) {
- result = eval_global.Lookup(source, context);
+ result = eval_global.Lookup(source, context, strict_mode);
} else {
- result = eval_contextual.Lookup(source, context);
+ result = eval_contextual.Lookup(source, context, strict_mode);
}
return result;
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 37e21be99d..f779a23aac 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -51,7 +51,8 @@ class CompilationCache {
// contain a script for the given source string.
static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
- bool is_global);
+ bool is_global,
+ StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 5c18c3e53e..77111a842e 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -548,7 +548,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
- bool is_global) {
+ bool is_global,
+ StrictModeFlag strict_mode) {
int source_length = source->length();
Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@@ -559,7 +560,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result;
- result = CompilationCache::LookupEval(source, context, is_global);
+ result = CompilationCache::LookupEval(source,
+ context,
+ is_global,
+ strict_mode);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
@@ -567,9 +571,14 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
+ if (strict_mode == kStrictMode) info.MarkAsStrict();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
+ // If caller is strict mode, the result must be strict as well,
+ // but not the other way around. Consider:
+ // eval("'use strict'; ...");
+ ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
CompilationCache::PutEval(source, context, is_global, result);
}
}
@@ -762,6 +771,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
*lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_strict_mode(lit->strict_mode());
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 44ac9c85ce..9843dd6452 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -49,6 +49,7 @@ class CompilationInfo BASE_EMBEDDED {
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
+ bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -69,6 +70,13 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
+ void MarkAsStrict() {
+ ASSERT(!is_lazy());
+ flags_ |= IsStrict::encode(true);
+ }
+ StrictModeFlag StrictMode() {
+ return is_strict() ? kStrictMode : kNonStrictMode;
+ }
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
@@ -162,6 +170,8 @@ class CompilationInfo BASE_EMBEDDED {
class IsGlobal: public BitField<bool, 2, 1> {};
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
+ // Strict mode - used in eager compilation.
+ class IsStrict: public BitField<bool, 4, 1> {};
unsigned flags_;
@@ -230,7 +240,8 @@ class Compiler : public AllStatic {
// Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context,
- bool is_global);
+ bool is_global,
+ StrictModeFlag strict_mode);
// Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow.
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index a954d6cc69..a348235d66 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -125,8 +125,8 @@ static bool isDigit(int x, int radix) {
}
-static double SignedZero(bool sign) {
- return sign ? -0.0 : 0.0;
+static double SignedZero(bool negative) {
+ return negative ? -0.0 : 0.0;
}
@@ -134,14 +134,14 @@ static double SignedZero(bool sign) {
template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(Iterator current,
EndMark end,
- bool sign,
+ bool negative,
bool allow_trailing_junk) {
ASSERT(current != end);
// Skip leading 0s.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
int64_t number = 0;
@@ -217,7 +217,7 @@ static double InternalStringToIntDouble(Iterator current,
ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
if (exponent == 0) {
- if (sign) {
+ if (negative) {
if (number == 0) return -0.0;
number = -number;
}
@@ -227,7 +227,7 @@ static double InternalStringToIntDouble(Iterator current,
ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent
// and sign. Assuming it's a rare case more simple code is used.
- return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
+ return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
}
@@ -238,7 +238,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (!AdvanceToNonspace(&current, end)) return empty_string_val;
- bool sign = false;
+ bool negative = false;
bool leading_zero = false;
if (*current == '+') {
@@ -248,14 +248,14 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
} else if (*current == '-') {
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
- sign = true;
+ negative = true;
}
if (radix == 0) {
// Radix detection.
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') {
radix = 16;
++current;
@@ -271,7 +271,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (*current == '0') {
// Allow "0x" prefix.
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') {
++current;
if (current == end) return JUNK_STRING_VALUE;
@@ -287,7 +287,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
while (*current == '0') {
leading_zero = true;
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
if (!leading_zero && !isDigit(*current, radix)) {
@@ -298,21 +298,21 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
default:
UNREACHABLE();
}
@@ -344,7 +344,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
- return sign ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
+ return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
}
// The following code causes accumulating rounding error for numbers greater
@@ -406,7 +406,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
return JUNK_STRING_VALUE;
}
- return sign ? -v : v;
+ return negative ? -v : v;
}
@@ -445,7 +445,7 @@ static double InternalStringToDouble(Iterator current,
bool nonzero_digit_dropped = false;
bool fractional_part = false;
- bool sign = false;
+ bool negative = false;
if (*current == '+') {
// Ignore leading sign.
@@ -454,7 +454,7 @@ static double InternalStringToDouble(Iterator current,
} else if (*current == '-') {
++current;
if (current == end) return JUNK_STRING_VALUE;
- sign = true;
+ negative = true;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -468,13 +468,13 @@ static double InternalStringToDouble(Iterator current,
}
ASSERT(buffer_pos == 0);
- return sign ? -V8_INFINITY : V8_INFINITY;
+ return negative ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
leading_zero = true;
@@ -487,14 +487,14 @@ static double InternalStringToDouble(Iterator current,
return InternalStringToIntDouble<4>(current,
end,
- sign,
+ negative,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
}
@@ -539,7 +539,7 @@ static double InternalStringToDouble(Iterator current,
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
exponent--; // Move this 0 into the exponent.
}
}
@@ -631,7 +631,7 @@ static double InternalStringToDouble(Iterator current,
if (octal) {
return InternalStringToIntDouble<3>(buffer,
buffer + buffer_pos,
- sign,
+ negative,
allow_trailing_junk);
}
@@ -644,7 +644,7 @@ static double InternalStringToDouble(Iterator current,
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return sign ? -converted : converted;
+ return negative ? -converted : converted;
}
@@ -702,26 +702,12 @@ double StringToDouble(Vector<const char> str,
const char* DoubleToCString(double v, Vector<char> buffer) {
- StringBuilder builder(buffer.start(), buffer.length());
-
switch (fpclassify(v)) {
- case FP_NAN:
- builder.AddString("NaN");
- break;
-
- case FP_INFINITE:
- if (v < 0.0) {
- builder.AddString("-Infinity");
- } else {
- builder.AddString("Infinity");
- }
- break;
-
- case FP_ZERO:
- builder.AddCharacter('0');
- break;
-
+ case FP_NAN: return "NaN";
+ case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
+ case FP_ZERO: return "0";
default: {
+ StringBuilder builder(buffer.start(), buffer.length());
int decimal_point;
int sign;
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -764,9 +750,9 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
if (exponent < 0) exponent = -exponent;
builder.AddFormatted("%d", exponent);
}
+ return builder.Finalize();
}
}
- return builder.Finalize();
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index f081576aa8..00e7d0ee2c 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -817,7 +817,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
// call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_start();
+ unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
@@ -836,7 +836,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
// stack check calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_start();
+ unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 194a299f02..243abf079c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -313,12 +313,12 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
- ? static_cast<int>(code->safepoint_table_start())
+ ? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_start()));
+ Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
}
byte* begin = code->instruction_start();
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index b8f081c54d..63daa05b5b 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -40,8 +40,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+ bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect.
- Heap::CollectAllGarbage(false);
+ if (args.Length() >= 1 && args[0]->IsBoolean()) {
+ compact = args[0]->BooleanValue();
+ }
+ Heap::CollectAllGarbage(compact);
return v8::Undefined();
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index af3ac00bae..4ed3fecfe8 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -304,7 +304,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_allow_osr_at_loop_nesting_level(0);
- code->set_stack_check_table_start(table_offset);
+ code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 0482ee8d94..2d0998d8cf 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -531,6 +531,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
+ StrictModeFlag strict_mode_flag() {
+ return function()->strict_mode() ? kStrictMode : kNonStrictMode;
+ }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 461c3f5fab..274c34ddeb 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -873,7 +873,7 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
int expected_additional_properties,
bool condition) {
object_ = object;
- if (condition && object_->HasFastProperties()) {
+ if (condition && object_->HasFastProperties() && !object->IsJSGlobalProxy()) {
// Normalize the properties of object to avoid n^2 behavior
// when extending the object multiple properties. Indicate the number of
// properties to be added.
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index dfda7c6fec..732d2f4102 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -373,6 +373,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
+ Heap::CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
@@ -808,7 +809,7 @@ void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj);
agg_snapshot_->js_cons_profile()->CollectStats(obj);
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 0093829b1c..0e3a2b870e 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -1943,6 +1943,14 @@ void Heap::CreateJSConstructEntryStub() {
}
+#if V8_TARGET_ARCH_ARM
+void Heap::CreateDirectCEntryStub() {
+ DirectCEntryStub stub;
+ set_direct_c_entry_code(*stub.GetCode());
+}
+#endif
+
+
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
@@ -1963,6 +1971,9 @@ void Heap::CreateFixedStubs() {
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Heap::CreateRegExpCEntryStub();
#endif
+#if V8_TARGET_ARCH_ARM
+ Heap::CreateDirectCEntryStub();
+#endif
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index e051b50235..dcd813b774 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -122,7 +122,12 @@ namespace internal {
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- V(Code, re_c_entry_code, RegExpCEntryCode)
+ V(Code, re_c_entry_code, RegExpCEntryCode) \
+ V(Code, direct_c_entry_code, DirectCEntryCode)
+#elif V8_TARGET_ARCH_ARM
+#define STRONG_ROOT_LIST(V) \
+ UNCONDITIONAL_STRONG_ROOT_LIST(V) \
+ V(Code, direct_c_entry_code, DirectCEntryCode)
#else
#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
#endif
@@ -178,6 +183,7 @@ namespace internal {
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
+ V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
@@ -1319,12 +1325,13 @@ class Heap : public AllStatic {
static bool CreateInitialMaps();
static bool CreateInitialObjects();
- // These four Create*EntryStub functions are here and forced to not be inlined
+ // These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
NO_INLINE(static void CreateCEntryStub());
NO_INLINE(static void CreateJSEntryStub());
NO_INLINE(static void CreateJSConstructEntryStub());
NO_INLINE(static void CreateRegExpCEntryStub());
+ NO_INLINE(static void CreateDirectCEntryStub());
static void CreateFixedStubs();
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index c24ddfc80c..0ff41ba238 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -1193,7 +1193,15 @@ void HStoreGlobal::PrintDataTo(StringStream* stream) const {
void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ value()->PrintNameTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
+ context()->PrintNameTo(stream);
+ stream->Add("[%d] = ", slot_index());
+ value()->PrintNameTo(stream);
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index eaab8adef2..f1093a004c 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -98,6 +98,7 @@ class LChunkBuilder;
V(CompareJSObjectEq) \
V(CompareMap) \
V(Constant) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
@@ -129,6 +130,7 @@ class LChunkBuilder;
V(Mul) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -139,6 +141,7 @@ class LChunkBuilder;
V(Shr) \
V(Simulate) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
@@ -163,6 +166,7 @@ class LChunkBuilder;
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
+ V(ContextSlots) \
V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \
@@ -1060,12 +1064,39 @@ class HPushArgument: public HUnaryOperation {
};
-class HGlobalObject: public HInstruction {
+class HContext: public HInstruction {
public:
- HGlobalObject() {
+ HContext() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HOuterContext: public HUnaryOperation {
+ public:
+ explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HGlobalObject: public HUnaryOperation {
+ public:
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
@@ -1075,12 +1106,12 @@ class HGlobalObject: public HInstruction {
};
-class HGlobalReceiver: public HInstruction {
+class HGlobalReceiver: public HUnaryOperation {
public:
- HGlobalReceiver() {
+ explicit HGlobalReceiver(HValue* global_object)
+ : HUnaryOperation(global_object) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
@@ -2613,35 +2644,66 @@ class HStoreGlobal: public HUnaryOperation {
};
-class HLoadContextSlot: public HInstruction {
+class HLoadContextSlot: public HUnaryOperation {
public:
- HLoadContextSlot(int context_chain_length , int slot_index)
- : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+ HLoadContextSlot(HValue* context , int slot_index)
+ : HUnaryOperation(context), slot_index_(slot_index) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
+ SetFlag(kDependsOnContextSlots);
}
- int context_chain_length() const { return context_chain_length_; }
int slot_index() const { return slot_index_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- virtual intptr_t Hashcode() const {
- return context_chain_length() * 29 + slot_index();
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) const;
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
protected:
virtual bool DataEquals(HValue* other) const {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
- return (context_chain_length() == b->context_chain_length())
- && (slot_index() == b->slot_index());
+ return (slot_index() == b->slot_index());
}
private:
- int context_chain_length_;
+ int slot_index_;
+};
+
+
+static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+ return !value->type().IsSmi() &&
+ !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+}
+
+
+class HStoreContextSlot: public HBinaryOperation {
+ public:
+ HStoreContextSlot(HValue* context, int slot_index, HValue* value)
+ : HBinaryOperation(context, value), slot_index_(slot_index) {
+ SetFlag(kChangesContextSlots);
+ }
+
+ HValue* context() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+ int slot_index() const { return slot_index_; }
+
+ bool NeedsWriteBarrier() const {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+
+ private:
int slot_index_;
};
@@ -2777,12 +2839,6 @@ class HLoadKeyedGeneric: public HLoadKeyed {
};
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsSmi() &&
- !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
class HStoreNamed: public HBinaryOperation {
public:
HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
@@ -2800,10 +2856,6 @@ class HStoreNamed: public HBinaryOperation {
HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); }
- bool NeedsWriteBarrier() const {
- return StoringValueNeedsWriteBarrier(value());
- }
-
DECLARE_INSTRUCTION(StoreNamed)
private:
@@ -2831,7 +2883,7 @@ class HStoreNamedField: public HStoreNamed {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
+ return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
@@ -2840,6 +2892,10 @@ class HStoreNamedField: public HStoreNamed {
Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; }
+ bool NeedsWriteBarrier() const {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+
private:
bool is_in_object_;
int offset_;
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 9d5aa2be31..4044f7fff1 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -2955,6 +2955,19 @@ void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
}
+HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+ ASSERT(var->IsContextSlot());
+ HInstruction* context = new HContext;
+ AddInstruction(context);
+ int length = graph()->info()->scope()->ContextChainLength(var->scope());
+ while (length-- > 0) {
+ context = new HOuterContext(context);
+ AddInstruction(context);
+ }
+ return context;
+}
+
+
void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Variable* variable = expr->AsVariable();
if (variable == NULL) {
@@ -2968,16 +2981,9 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (variable->mode() == Variable::CONST) {
BAILOUT("reference to const context slot");
}
- Slot* slot = variable->AsSlot();
- CompilationInfo* info = graph()->info();
- int context_chain_length = info->function()->scope()->
- ContextChainLength(slot->var()->scope());
- ASSERT(context_chain_length >= 0);
- // TODO(antonm): if slot's value is not modified by closures, instead
- // of reading it out of context, we could just embed the value as
- // a constant.
- HLoadContextSlot* instr =
- new HLoadContextSlot(context_chain_length, slot->index());
+ HValue* context = BuildContextChainWalk(variable);
+ int index = variable->AsSlot()->index();
+ HLoadContextSlot* instr = new HLoadContextSlot(context, index);
ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
@@ -3515,35 +3521,48 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
if (proxy->IsArguments()) BAILOUT("assignment to arguments");
// Handle the assignment.
- if (var->is_global()) {
+ if (var->IsStackAllocated()) {
+ HValue* value = NULL;
+ // Handle stack-allocated variables on the right-hand side directly.
+ // We do not allow the arguments object to occur in a context where it
+ // may escape, but assignments to stack-allocated locals are
+ // permitted. Handling such assignments here bypasses the check for
+ // the arguments object in VisitVariableProxy.
+ Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
+ if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
+ value = environment()->Lookup(rhs_var);
+ } else {
+ VISIT_FOR_VALUE(expr->value());
+ value = Pop();
+ }
+ Bind(var, value);
+ ast_context()->ReturnValue(value);
+
+ } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
+ VISIT_FOR_VALUE(expr->value());
+ HValue* context = BuildContextChainWalk(var);
+ int index = var->AsSlot()->index();
+ HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+ AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
+
+ } else if (var->is_global()) {
VISIT_FOR_VALUE(expr->value());
HandleGlobalVariableAssignment(var,
Top(),
expr->position(),
expr->AssignmentId());
- } else if (var->IsStackAllocated()) {
- // We allow reference to the arguments object only in assignemtns
- // to local variables to make sure that the arguments object does
- // not escape and is not modified.
- VariableProxy* rhs = expr->value()->AsVariableProxy();
- if (rhs != NULL &&
- rhs->var()->IsStackAllocated() &&
- environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
- Push(environment()->Lookup(rhs->var()));
- } else {
- VISIT_FOR_VALUE(expr->value());
- }
- Bind(proxy->var(), Top());
+ ast_context()->ReturnValue(Pop());
+
} else {
- BAILOUT("Assigning to no non-stack-allocated/non-global variable");
+ BAILOUT("assignment to LOOKUP or const CONTEXT variable");
}
- // Return the value.
- ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
HandlePropertyAssignment(expr);
} else {
- BAILOUT("unsupported invalid lhs");
+ BAILOUT("invalid left-hand side in assignment");
}
}
@@ -4422,7 +4441,10 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- PushAndAdd(new HGlobalObject);
+ HContext* context = new HContext;
+ HGlobalObject* global_object = new HGlobalObject(context);
+ AddInstruction(context);
+ PushAndAdd(global_object);
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@@ -4431,7 +4453,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
AddInstruction(new HCheckFunction(function, expr->target()));
// Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = new HGlobalReceiver;
+ HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
AddInstruction(global_receiver);
@@ -4458,7 +4480,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = new HCallKnownGlobal(expr->target(), argument_count);
} else {
- PushAndAdd(new HGlobalObject);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ PushAndAdd(new HGlobalObject(context));
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@@ -4466,7 +4490,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else {
- PushAndAdd(new HGlobalReceiver);
+ HContext* context = new HContext;
+ HGlobalObject* global_object = new HGlobalObject(context);
+ AddInstruction(context);
+ AddInstruction(global_object);
+ PushAndAdd(new HGlobalReceiver(global_object));
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 952c04fdec..0178f3f85b 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -823,6 +823,8 @@ class HGraphBuilder: public AstVisitor {
HValue* switch_value,
CaseClause* clause);
+ HValue* BuildContextChainWalk(Variable* var);
+
void AddCheckConstantFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a9c3085bad..cfee9709b7 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -6511,6 +6511,54 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
+// Loads a indexed element from a pixel array.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // elements - is set to the the receiver's element if
+ // the receiver doesn't have a pixel array or the
+ // key is not a smi, otherwise it's the elements'
+ // external pointer.
+ // untagged_key - is set to the untagged key
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ mov(untagged_key, key);
+ __ SmiUntag(untagged_key);
+
+ // Verify that the receiver has pixel array elements.
+ __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+ // Key must be in range.
+ __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Perform the indexed load and tag the result as a smi.
+ __ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
+ __ SmiTag(result);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index cd18749b12..2064574ce8 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -490,6 +490,25 @@ class NumberToStringStub: public CodeStub {
};
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 73c952a351..52de32b6f3 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -6102,9 +6102,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result);
slow.Bind();
@@ -6121,8 +6124,11 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 46b12cbb07..27e339620c 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -365,6 +365,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 35c671365d..a646052eee 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -45,6 +45,16 @@ int Deoptimizer::patch_size() {
}
+static void ZapCodeRange(Address start, Address end) {
+#ifdef DEBUG
+ ASSERT(start <= end);
+ int size = end - start;
+ CodePatcher destroyer(start, size);
+ while (size-- > 0) destroyer.masm()->int3();
+#endif
+}
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation;
@@ -52,90 +62,61 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code.
Code* code = function->code();
-
- // For each return after a safepoint insert a absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
+ Address code_start_address = code->instruction_start();
// We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte array.
- // Later on we will align this at the start of the byte array and create
- // a trash byte array of the remaining space.
+ // is written backward. The relocation info is the payload of a byte
+ // array. Later on we will slide this to the start of the byte array and
+ // create a filler object in the remaining space.
ByteArray* reloc_info = code->relocation_info();
- Address end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(end_address, code->instruction_start());
+ Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+ RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
+
+ // For each return after a safepoint insert a call to the corresponding
+ // deoptimization entry. Since the call is a relative encoding, write new
+ // reloc info. We do not need any of the existing reloc info because the
+ // existing code will not be used again (we zap it in debug builds).
+ SafepointTable table(code);
+ Address prev_address = code_start_address;
+ for (unsigned i = 0; i < table.length(); ++i) {
+ Address curr_address = code_start_address + table.GetPcOffset(i);
+ ZapCodeRange(prev_address, curr_address);
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = pc_offset - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
- last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- last_pc_offset += gap_code_size;
- Address call_pc = code->instruction_start() + last_pc_offset;
- CodePatcher patcher(call_pc, patch_size());
- Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
- patcher.masm()->call(entry, RelocInfo::NONE);
- last_pc_offset += patch_size();
- RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(entry));
+ // The gap code is needed to get to the state expected at the bailout.
+ curr_address += safepoint_entry.gap_code_size();
+
+ CodePatcher patcher(curr_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
+
+ curr_address += patch_size();
}
+ prev_address = curr_address;
}
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = code->safepoint_table_start() - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
+ ZapCodeRange(prev_address,
+ code_start_address + code->safepoint_table_offset());
// Move the relocation info to the beginning of the byte array.
- int reloc_size = end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size);
+ int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+ memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
- reloc_info->set_length(reloc_size);
+ reloc_info->set_length(new_reloc_size);
// Handle the junk part after the new relocation info. We will create
// a non-live object in the extra space at the end of the former reloc info.
- Address junk = reloc_info->address() + reloc_info->Size();
- ASSERT(junk <= end_address);
-
- if (end_address - junk <= ByteArray::kHeaderSize) {
- // We get in here if there is not enough space for a ByteArray.
-
- // Both addresses are kPointerSize alligned.
- CHECK_EQ((end_address - junk) % 4, 0);
- Map* filler_map = Heap::one_pointer_filler_map();
- while (junk < end_address) {
- HeapObject::FromAddress(junk)->set_map(filler_map);
- junk += kPointerSize;
- }
- } else {
- int size = end_address - junk;
- // Since the reloc_end address and junk are both alligned, we shouild,
- // never have junk which is not a multipla of kPointerSize.
- CHECK_EQ(size % kPointerSize, 0);
- CHECK_GT(size, 0);
- HeapObject* junk_object = HeapObject::FromAddress(junk);
- junk_object->set_map(Heap::byte_array_map());
- int length = ByteArray::LengthFor(end_address - junk);
- ByteArray::cast(junk_object)->set_length(length);
- }
+ Address junk_address = reloc_info->address() + reloc_info->Size();
+ ASSERT(junk_address <= reloc_end_address);
+ Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 3ef815912f..3c094a4e12 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -206,45 +206,48 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, ecx, ebx, edx);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ NearLabel ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, taken);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
EmitReturnSequence();
}
@@ -610,7 +613,7 @@ void FullCodeGenerator::Move(Slot* dst,
__ mov(location, src);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ int offset = Context::SlotOffset(dst->index());
__ RecordWrite(scratch1, offset, src, scratch2);
}
}
@@ -666,10 +669,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
@@ -1124,8 +1128,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
// Check that last extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
- __ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an esi-based operand (the write barrier cannot be allowed to
+ // destroy the esi register).
+ return ContextOperand(context, slot->index());
}
@@ -2000,57 +2007,75 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ __ mov(edx, Operand(ebp, SlotOffset(slot)));
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &skip);
+ __ mov(Operand(ebp, SlotOffset(slot)), eax);
+ break;
+ case Slot::CONTEXT: {
+ __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ mov(edx, ContextOperand(ecx, slot->index()));
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &skip);
+ __ mov(ContextOperand(ecx, slot->index()), eax);
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(edx, eax); // Preserve the stored value in eax.
+ __ RecordWrite(ecx, offset, edx, ebx);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ mov(edx, Operand(ebp, SlotOffset(slot)));
- __ cmp(edx, Factory::the_hole_value());
- __ j(not_equal, &done);
- }
// Perform the assignment.
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, ecx);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ mov(edx, target);
- __ cmp(edx, Factory::the_hole_value());
- __ j(not_equal, &done);
- }
// Perform the assignment and issue the write barrier.
__ mov(target, eax);
// The value of the assignment is in eax. RecordWrite clobbers its
// register arguments.
__ mov(edx, eax);
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ int offset = Context::SlotOffset(slot->index());
__ RecordWrite(ecx, offset, edx, ebx);
break;
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -2270,7 +2295,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index c1369774d8..3fe8fdb969 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -556,19 +556,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&check_pixel_array);
- // Check whether the elements is a pixel array.
- // edx: receiver
- // eax: key
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- __ mov(ebx, eax);
- __ SmiUntag(ebx);
- __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
- __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
- __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
- __ SmiTag(eax);
- __ ret(0);
+ GenerateFastPixelArrayLoad(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ eax,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index ae8fe8d65a..9e4bada4f4 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -77,7 +77,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -1914,10 +1914,21 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- // TODO(antonm): load a context with a separate instruction.
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadContext(result, instr->context_chain_length());
- __ mov(result, ContextOperand(result, instr->slot_index()));
+ __ mov(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ mov(ContextOperand(context, instr->slot_index()), value);
+ if (instr->needs_write_barrier()) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, offset, value, temp);
+ }
}
@@ -2142,6 +2153,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(receiver.is(eax));
v8::internal::ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+
+ // Restore context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -2155,16 +2169,31 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, esi);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -3406,7 +3435,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = !instr->hydrogen()->pretenure();
+ bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ push(Immediate(shared_info));
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 2070367320..b31c4eba18 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -68,11 +69,35 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs can use either fixed register or have a short lifetime (be
+ // used at start of the instruction).
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ LUnallocated::cast(operand)->IsUsedAtStart() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -268,7 +293,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
}
@@ -391,7 +424,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -407,7 +440,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -675,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -700,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -909,7 +944,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -932,11 +966,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -1140,13 +1170,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1658,7 +1701,25 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- return DefineAsRegister(new LLoadContextSlot);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ LOperand* temp;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ temp = TempRegister();
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ temp = NULL;
+ }
+ return new LStoreContextSlot(context, value, temp);
}
@@ -1756,7 +1817,8 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister() : NULL;
+ ? TempRegister()
+ : NULL;
return new LStoreNamedField(obj, val, temp);
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 1adc13e4d4..f643dac9f6 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -39,118 +39,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LStringCharCodeAt
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LStringLength
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
@@ -187,6 +75,8 @@ class LCodeGen;
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
@@ -197,16 +87,21 @@ class LCodeGen;
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(FixedArrayLength) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
@@ -218,22 +113,16 @@ class LCodeGen;
V(IsSmi) \
V(IsSmiAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -241,6 +130,7 @@ class LCodeGen;
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -250,6 +140,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
@@ -291,7 +182,10 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -308,15 +202,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -331,11 +224,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
@@ -362,6 +279,11 @@ class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
@@ -1286,18 +1208,42 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@@ -1308,15 +1254,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1815,7 +1791,7 @@ class LChunk: public ZoneObject {
pointer_maps_(8),
inlined_closures_(1) { }
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a4c9b11879..ee4e3d9cd6 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1523,11 +1523,21 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
}
- // The context may be an intermediate context, not a function context.
- mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, esi);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(equal, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6f180c6c2b..12a8923113 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,17 @@ class MacroAssembler: public Assembler {
j(not_carry, is_smi);
}
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, not_taken);
+ }
+ // Jump if register contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, not_smi_label, not_taken);
+ }
+
// Assumes input is a heap object.
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 6de9a410d1..f96ef5cefc 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -3155,6 +3155,37 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ edx,
+ eax,
+ ecx,
+ ebx,
+ eax,
+ &miss,
+ &miss,
+ &miss);
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 9a277d6c0c..8e282adc34 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -1204,23 +1204,31 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (use_ic) {
Code* stub = generic_stub();
- if (object->IsString() && key->IsNumber()) {
- stub = string_stub();
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false);
- stub =
- probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (state == UNINITIALIZED &&
- key->IsSmi() &&
- receiver->map()->has_fast_elements()) {
- MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver);
- stub =
- probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
+ if (state == UNINITIALIZED) {
+ if (object->IsString() && key->IsNumber()) {
+ stub = string_stub();
+ } else if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->HasExternalArrayElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
+ false);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
+ } else if (receiver->HasPixelElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadPixelArray(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (key->IsSmi() &&
+ receiver->map()->has_fast_elements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadSpecialized(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ }
}
}
if (stub != NULL) set_target(stub);
@@ -2053,6 +2061,8 @@ TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
}
if (left_type.IsInteger32() && right_type.IsInteger32()) {
+ // Platforms with 32-bit Smis have no distinct INT32 type.
+ if (kSmiValueSize == 32) return SMI;
return INT32;
}
@@ -2096,9 +2106,11 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
}
if (type == TRBinaryOpIC::SMI &&
previous_type == TRBinaryOpIC::SMI) {
- if (op == Token::DIV || op == Token::MUL) {
+ if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
+ // With 32-bit Smis, all overflows give heap numbers, but with
+ // 31-bit Smis, most operations overflow to int32 results.
result_type = TRBinaryOpIC::HEAP_NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
new file mode 100644
index 0000000000..84c5bbdc69
--- /dev/null
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
+#define V8_LITHIUM_ALLOCATOR_INL_H_
+
+#include "lithium-allocator.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
+
+
+LInstruction* LAllocator::InstructionAt(int index) {
+ return chunk_->instructions()->at(index);
+}
+
+
+LGap* LAllocator::GapAt(int index) {
+ return chunk_->GetGapAt(index);
+}
+
+
+TempIterator::TempIterator(LInstruction* instr)
+ : instr_(instr),
+ limit_(instr->TempCount()),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+}
+
+
+bool TempIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* TempIterator::Next() {
+ ASSERT(HasNext());
+ return instr_->TempAt(current_);
+}
+
+
+int TempIterator::AdvanceToNext(int start) {
+ while (start < limit_ && instr_->TempAt(start) == NULL) start++;
+ return start;
+}
+
+
+void TempIterator::Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+ : instr_(instr),
+ limit_(instr->InputCount()),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+}
+
+
+bool InputIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* InputIterator::Next() {
+ ASSERT(HasNext());
+ return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+}
+
+
+int InputIterator::AdvanceToNext(int start) {
+ while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
+ return start;
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+ : input_iterator_(instr), env_iterator_(instr->environment()) { }
+
+
+bool UseIterator::HasNext() {
+ return input_iterator_.HasNext() || env_iterator_.HasNext();
+}
+
+
+LOperand* UseIterator::Next() {
+ ASSERT(HasNext());
+ return input_iterator_.HasNext()
+ ? input_iterator_.Next()
+ : env_iterator_.Next();
+}
+
+
+void UseIterator::Advance() {
+ input_iterator_.HasNext()
+ ? input_iterator_.Advance()
+ : env_iterator_.Advance();
+}
+
+} } // namespace v8::internal
+
+#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 81877f3768..9f5f1b97d8 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "lithium-allocator.h"
+#include "lithium-allocator-inl.h"
#include "hydrogen.h"
#include "string-stream.h"
@@ -532,7 +532,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
- int block_count = graph()->blocks()->length();
+ int block_count = graph_->blocks()->length();
live_in_sets_.Initialize(block_count);
live_in_sets_.AddBlock(NULL, block_count);
}
@@ -613,7 +613,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
}
if (is_tagged) {
TraceAlloc("Fixed reg is tagged at %d\n", pos);
- LInstruction* instr = chunk_->instructions()->at(pos);
+ LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(operand);
}
@@ -668,17 +668,17 @@ LiveRange* LAllocator::LiveRangeFor(int index) {
}
-LGap* LAllocator::GetLastGap(HBasicBlock* block) const {
+LGap* LAllocator::GetLastGap(HBasicBlock* block) {
int last_instruction = block->last_instruction_index();
int index = chunk_->NearestGapPos(last_instruction);
- return chunk_->GetGapAt(index);
+ return GapAt(index);
}
HPhi* LAllocator::LookupPhi(LOperand* operand) const {
if (!operand->IsUnallocated()) return NULL;
int index = operand->VirtualRegister();
- HValue* instr = graph()->LookupValue(index);
+ HValue* instr = graph_->LookupValue(index);
if (instr != NULL && instr->IsPhi()) {
return HPhi::cast(instr);
}
@@ -737,7 +737,7 @@ void LAllocator::Use(LifetimePosition block_start,
void LAllocator::AddConstraintsGapMove(int index,
LOperand* from,
LOperand* to) {
- LGap* gap = chunk_->GetGapAt(index);
+ LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
@@ -760,24 +760,24 @@ void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
for (int i = start; i <= end; ++i) {
- if (chunk_->IsGapAt(i)) {
- InstructionSummary* summary = NULL;
- InstructionSummary* prev_summary = NULL;
- if (i < end) summary = GetSummary(i + 1);
- if (i > start) prev_summary = GetSummary(i - 1);
- MeetConstraintsBetween(prev_summary, summary, i);
+ if (IsGapAt(i)) {
+ LInstruction* instr = NULL;
+ LInstruction* prev_instr = NULL;
+ if (i < end) instr = InstructionAt(i + 1);
+ if (i > start) prev_instr = InstructionAt(i - 1);
+ MeetConstraintsBetween(prev_instr, instr, i);
}
}
}
-void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
- InstructionSummary* second,
+void LAllocator::MeetConstraintsBetween(LInstruction* first,
+ LInstruction* second,
int gap_index) {
// Handle fixed temporaries.
if (first != NULL) {
- for (int i = 0; i < first->TempCount(); ++i) {
- LUnallocated* temp = LUnallocated::cast(first->TempAt(i));
+ for (TempIterator it(first); it.HasNext(); it.Advance()) {
+ LUnallocated* temp = LUnallocated::cast(it.Next());
if (temp->HasFixedPolicy()) {
AllocateFixed(temp, gap_index - 1, false);
}
@@ -810,7 +810,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
// and splitting of live ranges do not account for it.
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
- LGap* gap = chunk_->GetGapAt(gap_index);
+ LGap* gap = GapAt(gap_index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
move->AddMove(first_output, range->GetSpillOperand());
}
@@ -818,8 +818,8 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
// Handle fixed input operands of second instruction.
if (second != NULL) {
- for (int i = 0; i < second->InputCount(); ++i) {
- LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i));
+ for (UseIterator it(second); it.HasNext(); it.Advance()) {
+ LUnallocated* cur_input = LUnallocated::cast(it.Next());
if (cur_input->HasFixedPolicy()) {
LUnallocated* input_copy = cur_input->CopyUnconstrained();
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -848,7 +848,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
if (second != NULL && second->Output() != NULL) {
LUnallocated* second_output = LUnallocated::cast(second->Output());
if (second_output->HasSameAsInputPolicy()) {
- LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0));
+ LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
int output_vreg = second_output->VirtualRegister();
int input_vreg = cur_input->VirtualRegister();
@@ -858,7 +858,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
int index = gap_index + 1;
- LInstruction* instr = chunk_->instructions()->at(index);
+ LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(input_copy);
}
@@ -886,9 +886,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
LifetimePosition curr_position =
LifetimePosition::FromInstructionIndex(index);
- if (chunk_->IsGapAt(index)) {
+ if (IsGapAt(index)) {
// We have a gap at this position.
- LGap* gap = chunk_->GetGapAt(index);
+ LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -922,17 +922,17 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
} else {
- ASSERT(!chunk_->IsGapAt(index));
- InstructionSummary* summary = GetSummary(index);
+ ASSERT(!IsGapAt(index));
+ LInstruction* instr = InstructionAt(index);
- if (summary != NULL) {
- LOperand* output = summary->Output();
+ if (instr != NULL) {
+ LOperand* output = instr->Output();
if (output != NULL) {
if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
Define(curr_position, output, NULL);
}
- if (summary->IsCall()) {
+ if (instr->IsMarkedAsCall()) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
@@ -943,7 +943,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
- if (summary->IsCall() || summary->IsSaveDoubles()) {
+ if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
@@ -954,8 +954,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
- for (int i = 0; i < summary->InputCount(); ++i) {
- LOperand* input = summary->InputAt(i);
+ for (UseIterator it(instr); it.HasNext(); it.Advance()) {
+ LOperand* input = it.Next();
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -969,9 +969,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (input->IsUnallocated()) live->Add(input->VirtualRegister());
}
- for (int i = 0; i < summary->TempCount(); ++i) {
- LOperand* temp = summary->TempAt(i);
- if (summary->IsCall()) {
+ for (TempIterator it(instr); it.HasNext(); it.Advance()) {
+ LOperand* temp = it.Next();
+ if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1042,9 +1042,9 @@ void LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
- HPhase phase("Register constraints", chunk());
+ HPhase phase("Register constraints", chunk_);
first_artificial_register_ = next_virtual_register_;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
MeetRegisterConstraints(block);
@@ -1053,10 +1053,10 @@ void LAllocator::MeetRegisterConstraints() {
void LAllocator::ResolvePhis() {
- HPhase phase("Resolve phis", chunk());
+ HPhase phase("Resolve phis", chunk_);
// Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id);
ResolvePhis(block);
@@ -1094,7 +1094,7 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL;
if (block->predecessors()->length() == 1) {
- gap = chunk_->GetGapAt(block->first_instruction_index());
+ gap = GapAt(block->first_instruction_index());
} else {
ASSERT(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred);
@@ -1107,19 +1107,19 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
int index = pos.InstructionIndex();
- if (chunk_->IsGapAt(index)) {
- LGap* gap = chunk_->GetGapAt(index);
+ if (IsGapAt(index)) {
+ LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
pos.IsInstructionStart() ? LGap::START : LGap::END);
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove(
+ return GapAt(gap_pos)->GetOrCreateParallelMove(
(gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
}
HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
- LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+ LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
return gap->block();
}
@@ -1166,7 +1166,7 @@ bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
void LAllocator::ResolveControlFlow() {
HPhase phase("Resolve control flow", this);
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id);
if (CanEagerlyResolveControlFlow(block)) continue;
@@ -1189,7 +1189,7 @@ void LAllocator::BuildLiveRanges() {
HPhase phase("Build live ranges", this);
InitializeLivenessAnalysis();
// Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id);
BitVector* live = ComputeLiveOut(block);
@@ -1264,7 +1264,7 @@ void LAllocator::BuildLiveRanges() {
found = true;
int operand_index = iterator.Current();
PrintF("Function: %s\n",
- *graph()->info()->function()->debug_name()->ToCString());
+ *graph_->info()->function()->debug_name()->ToCString());
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1469,7 +1469,7 @@ void LAllocator::AllocateRegisters() {
if (current->HasAllocatedSpillOperand()) {
TraceAlloc("Live range %d already has a spill operand\n", current->id());
LifetimePosition next_pos = position;
- if (chunk_->IsGapAt(next_pos.InstructionIndex())) {
+ if (IsGapAt(next_pos.InstructionIndex())) {
next_pos = next_pos.NextInstruction();
}
UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
@@ -1556,14 +1556,8 @@ void LAllocator::TraceAlloc(const char* msg, ...) {
}
-void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
- operand->set_virtual_register(value->id());
- current_summary()->AddInput(operand);
-}
-
-
bool LAllocator::HasTaggedValue(int virtual_register) const {
- HValue* value = graph()->LookupValue(virtual_register);
+ HValue* value = graph_->LookupValue(virtual_register);
if (value == NULL) return false;
return value->representation().IsTagged();
}
@@ -1571,7 +1565,7 @@ bool LAllocator::HasTaggedValue(int virtual_register) const {
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
if (virtual_register < first_artificial_register_) {
- HValue* value = graph()->LookupValue(virtual_register);
+ HValue* value = graph_->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
}
@@ -1584,39 +1578,8 @@ RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
}
-void LAllocator::MarkAsCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs can use either fixed register or have a short lifetime (be
- // used at start of the instruction).
- InstructionSummary* summary = current_summary();
-#ifdef DEBUG
- ASSERT(summary->Output() == NULL ||
- LUnallocated::cast(summary->Output())->HasFixedPolicy() ||
- !LUnallocated::cast(summary->Output())->HasRegisterPolicy());
- for (int i = 0; i < summary->InputCount(); i++) {
- ASSERT(LUnallocated::cast(summary->InputAt(i))->HasFixedPolicy() ||
- LUnallocated::cast(summary->InputAt(i))->IsUsedAtStart() ||
- !LUnallocated::cast(summary->InputAt(i))->HasRegisterPolicy());
- }
- for (int i = 0; i < summary->TempCount(); i++) {
- ASSERT(LUnallocated::cast(summary->TempAt(i))->HasFixedPolicy() ||
- !LUnallocated::cast(summary->TempAt(i))->HasRegisterPolicy());
- }
-#endif
- summary->MarkAsCall();
-}
-
-
-void LAllocator::MarkAsSaveDoubles() {
- current_summary()->MarkAsSaveDoubles();
-}
-
-
void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
operand->set_virtual_register(instr->id());
- current_summary()->SetOutput(operand);
}
@@ -1625,40 +1588,16 @@ void LAllocator::RecordTemporary(LUnallocated* operand) {
if (!operand->HasFixedPolicy()) {
operand->set_virtual_register(next_virtual_register_++);
}
- current_summary()->AddTemp(operand);
-}
-
-
-int LAllocator::max_initial_value_ids() {
- return LUnallocated::kMaxVirtualRegisters / 32;
-}
-
-
-void LAllocator::BeginInstruction() {
- if (next_summary_ == NULL) {
- next_summary_ = new InstructionSummary();
- }
- summary_stack_.Add(next_summary_);
- next_summary_ = NULL;
}
-void LAllocator::SummarizeInstruction(int index) {
- InstructionSummary* sum = summary_stack_.RemoveLast();
- if (summaries_.length() <= index) {
- summaries_.AddBlock(NULL, index + 1 - summaries_.length());
- }
- ASSERT(summaries_[index] == NULL);
- if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
- summaries_[index] = sum;
- } else {
- next_summary_ = sum;
- }
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+ operand->set_virtual_register(value->id());
}
-void LAllocator::OmitInstruction() {
- summary_stack_.RemoveLast();
+int LAllocator::max_initial_value_ids() {
+ return LUnallocated::kMaxVirtualRegisters / 32;
}
@@ -2007,7 +1946,7 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
- chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+ InstructionAt(pos.InstructionIndex())->IsLabel();
}
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 83f5583e3e..914a5b6868 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -31,6 +31,7 @@
#include "v8.h"
#include "data-flow.h"
+#include "lithium.h"
#include "zone.h"
namespace v8 {
@@ -153,52 +154,55 @@ enum RegisterKind {
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
-class InstructionSummary: public ZoneObject {
+
+class LInstruction;
+class LEnvironment;
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
public:
- InstructionSummary()
- : output_operand_(NULL),
- input_count_(0),
- operands_(4),
- is_call_(false),
- is_save_doubles_(false) {}
-
- // Output operands.
- LOperand* Output() const { return output_operand_; }
- void SetOutput(LOperand* output) {
- ASSERT(output_operand_ == NULL);
- output_operand_ = output;
- }
+ inline explicit TempIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
- // Input operands.
- int InputCount() const { return input_count_; }
- LOperand* InputAt(int i) const {
- ASSERT(i < input_count_);
- return operands_[i];
- }
- void AddInput(LOperand* input) {
- operands_.InsertAt(input_count_, input);
- input_count_++;
- }
+ private:
+ inline int AdvanceToNext(int start);
+ LInstruction* instr_;
+ int limit_;
+ int current_;
+};
- // Temporary operands.
- int TempCount() const { return operands_.length() - input_count_; }
- LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
- void AddTemp(LOperand* temp) { operands_.Add(temp); }
- void MarkAsCall() { is_call_ = true; }
- bool IsCall() const { return is_call_; }
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+ inline explicit InputIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
+
+ private:
+ inline int AdvanceToNext(int start);
+ LInstruction* instr_;
+ int limit_;
+ int current_;
+};
+
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
- bool IsSaveDoubles() const { return is_save_doubles_; }
+class UseIterator BASE_EMBEDDED {
+ public:
+ inline explicit UseIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
private:
- LOperand* output_operand_;
- int input_count_;
- ZoneList<LOperand*> operands_;
- bool is_call_;
- bool is_save_doubles_;
+ InputIterator input_iterator_;
+ DeepIterator env_iterator_;
};
+
// Representation of the non-empty interval [start,end[.
class UseInterval: public ZoneObject {
public:
@@ -428,9 +432,6 @@ class LAllocator BASE_EMBEDDED {
public:
explicit LAllocator(int first_virtual_register, HGraph* graph)
: chunk_(NULL),
- summaries_(0),
- next_summary_(NULL),
- summary_stack_(2),
live_in_sets_(0),
live_ranges_(16),
fixed_live_ranges_(8),
@@ -457,27 +458,12 @@ class LAllocator BASE_EMBEDDED {
// Record a temporary operand.
void RecordTemporary(LUnallocated* operand);
- // Marks the current instruction as a call.
- void MarkAsCall();
-
- // Marks the current instruction as requiring saving double registers.
- void MarkAsSaveDoubles();
-
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
- // Begin a new instruction.
- void BeginInstruction();
-
- // Summarize the current instruction.
- void SummarizeInstruction(int index);
-
- // Summarize the current instruction.
- void OmitInstruction();
-
// Control max function size.
static int max_initial_value_ids();
@@ -525,8 +511,8 @@ class LAllocator BASE_EMBEDDED {
void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
void ProcessInstructions(HBasicBlock* block, BitVector* live);
void MeetRegisterConstraints(HBasicBlock* block);
- void MeetConstraintsBetween(InstructionSummary* first,
- InstructionSummary* second,
+ void MeetConstraintsBetween(LInstruction* first,
+ LInstruction* second,
int gap_index);
void ResolvePhis(HBasicBlock* block);
@@ -604,12 +590,6 @@ class LAllocator BASE_EMBEDDED {
// Return the block which contains give lifetime position.
HBasicBlock* GetBlock(LifetimePosition pos);
- // Current active summary.
- InstructionSummary* current_summary() const { return summary_stack_.last(); }
-
- // Get summary for given instruction index.
- InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
-
// Helper methods for the fixed registers.
int RegisterCount() const;
static int FixedLiveRangeID(int index) { return -index - 1; }
@@ -618,15 +598,17 @@ class LAllocator BASE_EMBEDDED {
LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index);
HPhi* LookupPhi(LOperand* operand) const;
- LGap* GetLastGap(HBasicBlock* block) const;
+ LGap* GetLastGap(HBasicBlock* block);
const char* RegisterName(int allocation_index);
- LChunk* chunk_;
- ZoneList<InstructionSummary*> summaries_;
- InstructionSummary* next_summary_;
+ inline bool IsGapAt(int index);
- ZoneList<InstructionSummary*> summary_stack_;
+ inline LInstruction* InstructionAt(int index);
+
+ inline LGap* GapAt(int index);
+
+ LChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index e1b6fc0256..a2f9df0fdf 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -509,6 +509,82 @@ class LEnvironment: public ZoneObject {
friend class LCodegen;
};
+
+// Iterates over the non-null, non-constant operands in an environment.
+class ShallowIterator BASE_EMBEDDED {
+ public:
+ explicit ShallowIterator(LEnvironment* env)
+ : env_(env),
+ limit_(env != NULL ? env->values()->length() : 0),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+ }
+
+ inline bool HasNext() {
+ return env_ != NULL && current_ < limit_;
+ }
+
+ inline LOperand* Next() {
+ ASSERT(HasNext());
+ return env_->values()->at(current_);
+ }
+
+ inline void Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+ }
+
+ inline LEnvironment* env() { return env_; }
+
+ private:
+ inline int AdvanceToNext(int start) {
+ while (start < limit_ &&
+ (env_->values()->at(start) == NULL ||
+ env_->values()->at(start)->IsConstantOperand())) {
+ start++;
+ }
+ return start;
+ }
+
+ LEnvironment* env_;
+ int limit_;
+ int current_;
+};
+
+
+// Iterator for non-null, non-constant operands incl. outer environments.
+class DeepIterator BASE_EMBEDDED {
+ public:
+ explicit DeepIterator(LEnvironment* env)
+ : current_iterator_(env) { }
+
+ inline bool HasNext() {
+ if (current_iterator_.HasNext()) return true;
+ if (current_iterator_.env() == NULL) return false;
+ AdvanceToOuter();
+ return current_iterator_.HasNext();
+ }
+
+ inline LOperand* Next() {
+ ASSERT(current_iterator_.HasNext());
+ return current_iterator_.Next();
+ }
+
+ inline void Advance() {
+ if (current_iterator_.HasNext()) {
+ current_iterator_.Advance();
+ } else {
+ AdvanceToOuter();
+ }
+ }
+
+ private:
+ inline void AdvanceToOuter() {
+ current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ }
+
+ ShallowIterator current_iterator_;
+};
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 4c3d1e70e0..d22ac658d9 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -38,10 +38,6 @@ var COMPILATION_TYPE_HOST = 0;
var COMPILATION_TYPE_EVAL = 1;
var COMPILATION_TYPE_JSON = 2;
-// Lazily initialized.
-var kVowelSounds = 0;
-var kCapitalVowelSounds = 0;
-
// Matches Messages::kNoLineNumberInfo from v8.h
var kNoLineNumberInfo = 0;
@@ -52,8 +48,7 @@ var kAddMessageAccessorsMarker = { };
var kMessages = 0;
-var kReplacementMarkers =
- [ "%0", "%1", "%2", "%3" ]
+var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
function FormatString(format, message) {
var args = %MessageGetArguments(message);
@@ -152,6 +147,7 @@ function FormatMessage(message) {
unexpected_token_number: ["Unexpected number"],
unexpected_token_string: ["Unexpected string"],
unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
unexpected_eos: ["Unexpected end of input"],
malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
unterminated_regexp: ["Invalid regular expression: missing /"],
@@ -226,6 +222,7 @@ function FormatMessage(message) {
strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_reserved_word: ["Use of future reserved word in strict mode"],
};
}
var message_type = %MessageGetType(message);
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 50f4031b79..3b83dd4869 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -2510,29 +2510,29 @@ void Code::set_stack_slots(unsigned slots) {
}
-unsigned Code::safepoint_table_start() {
+unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+ return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
}
-void Code::set_safepoint_table_start(unsigned offset) {
+void Code::set_safepoint_table_offset(unsigned offset) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
}
-unsigned Code::stack_check_table_start() {
+unsigned Code::stack_check_table_offset() {
ASSERT(kind() == FUNCTION);
- return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+ return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
}
-void Code::set_stack_check_table_start(unsigned offset) {
+void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT(kind() == FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
}
@@ -2993,6 +2993,18 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
+bool SharedFunctionInfo::strict_mode() {
+ return BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+
+void SharedFunctionInfo::set_strict_mode(bool value) {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kStrictModeFunction,
+ value));
+}
+
+
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 8bced586b3..775487a0a6 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1213,6 +1213,8 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
MaybeObject* JSObject::AddFastProperty(String* name,
Object* value,
PropertyAttributes attributes) {
+ ASSERT(!IsJSGlobalProxy());
+
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
StringInputBuffer buffer(name);
@@ -2288,6 +2290,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
// The global object is always normalized.
ASSERT(!IsGlobalObject());
+ // JSGlobalProxy must never be normalized
+ ASSERT(!IsJSGlobalProxy());
+
// Allocate new content.
int property_count = map()->NumberOfDescribedProperties();
if (expected_additional_properties > 0) {
@@ -5920,7 +5925,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
@@ -6001,7 +6006,7 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
void Code::SetNoStackCheckTable() {
// Indicate the absence of a stack-check table by a table start after the
// end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+ set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
}
@@ -6278,7 +6283,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF(out, "\n");
} else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_start();
+ unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
@@ -6679,6 +6684,13 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
return UNDEFINED_ELEMENT;
}
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return UNDEFINED_ELEMENT;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->HasLocalElement(index);
+ }
+
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
@@ -7986,20 +7998,28 @@ class StringKey : public HashTableKey {
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
- StringSharedKey(String* source, SharedFunctionInfo* shared)
- : source_(source), shared_(shared) { }
+ StringSharedKey(String* source,
+ SharedFunctionInfo* shared,
+ StrictModeFlag strict_mode)
+ : source_(source),
+ shared_(shared),
+ strict_mode_(strict_mode) { }
bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
if (shared != shared_) return false;
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+ Smi::cast(pair->get(2))->value());
+ if (strict_mode != strict_mode_) return false;
String* source = String::cast(pair->get(1));
return source->Equals(source_);
}
static uint32_t StringSharedHashHelper(String* source,
- SharedFunctionInfo* shared) {
+ SharedFunctionInfo* shared,
+ StrictModeFlag strict_mode) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
@@ -8009,36 +8029,41 @@ class StringSharedKey : public HashTableKey {
// collection.
Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash();
+ if (strict_mode == kStrictMode) hash ^= 0x8000;
hash += shared->start_position();
}
return hash;
}
uint32_t Hash() {
- return StringSharedHashHelper(source_, shared_);
+ return StringSharedHashHelper(source_, shared_, strict_mode_);
}
uint32_t HashForObject(Object* obj) {
FixedArray* pair = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
String* source = String::cast(pair->get(1));
- return StringSharedHashHelper(source, shared);
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+ Smi::cast(pair->get(2))->value());
+ return StringSharedHashHelper(source, shared, strict_mode);
}
MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
pair->set(0, shared_);
pair->set(1, source_);
+ pair->set(2, Smi::FromInt(strict_mode_));
return pair;
}
private:
String* source_;
SharedFunctionInfo* shared_;
+ StrictModeFlag strict_mode_;
};
@@ -8997,8 +9022,10 @@ Object* CompilationCacheTable::Lookup(String* src) {
}
-Object* CompilationCacheTable::LookupEval(String* src, Context* context) {
- StringSharedKey key(src, context->closure()->shared());
+Object* CompilationCacheTable::LookupEval(String* src,
+ Context* context,
+ StrictModeFlag strict_mode) {
+ StringSharedKey key(src, context->closure()->shared(), strict_mode);
int entry = FindEntry(&key);
if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -9033,8 +9060,10 @@ MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
MaybeObject* CompilationCacheTable::PutEval(String* src,
Context* context,
- Object* value) {
- StringSharedKey key(src, context->closure()->shared());
+ SharedFunctionInfo* value) {
+ StringSharedKey key(src,
+ context->closure()->shared(),
+ value->strict_mode() ? kStrictMode : kNonStrictMode);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c9b3757df9..bf598307a2 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -3275,13 +3275,13 @@ class Code: public HeapObject {
// [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
// the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_start();
- inline void set_safepoint_table_start(unsigned offset);
+ inline unsigned safepoint_table_offset();
+ inline void set_safepoint_table_offset(unsigned offset);
// [stack_check_table_start]: For kind FUNCTION, the offset in the
// instruction stream where the stack check table starts.
- inline unsigned stack_check_table_start();
- inline void set_stack_check_table_start(unsigned offset);
+ inline unsigned stack_check_table_offset();
+ inline void set_stack_check_table_offset(unsigned offset);
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
@@ -3445,8 +3445,8 @@ class Code: public HeapObject {
static const int kAllowOSRAtLoopNestingLevelOffset =
kHasDeoptimizationSupportOffset + 1;
- static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize;
- static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize;
+ static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
+ static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
// Flags layout.
static const int kFlagsICStateShift = 0;
@@ -4176,6 +4176,10 @@ class SharedFunctionInfo: public HeapObject {
inline bool optimization_disabled();
inline void set_optimization_disabled(bool value);
+ // Indicates whether the function is a strict mode function.
+ inline bool strict_mode();
+ inline void set_strict_mode(bool value);
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -4357,6 +4361,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 0x7;
static const int kOptimizationDisabled = 7;
+ static const int kStrictModeFunction = 8;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -4902,10 +4907,12 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
public:
// Find cached value for a string key, otherwise return null.
Object* Lookup(String* src);
- Object* LookupEval(String* src, Context* context);
+ Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MaybeObject* Put(String* src, Object* value);
- MaybeObject* PutEval(String* src, Context* context, Object* value);
+ MaybeObject* PutEval(String* src,
+ Context* context,
+ SharedFunctionInfo* value);
MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
// Remove given value from cache.
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index ccb3f64e10..5353a63460 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -616,7 +616,8 @@ Parser::Parser(Handle<Script> script,
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
- bool in_global_context) {
+ bool in_global_context,
+ StrictModeFlag strict_mode) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse);
@@ -632,17 +633,18 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
}
}
FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
bool in_global_context,
+ StrictModeFlag strict_mode,
ZoneScope* zone_scope) {
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
@@ -662,6 +664,9 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
scope);
TemporaryScope temp_scope(&this->temp_scope_);
+ if (strict_mode == kStrictMode) {
+ temp_scope.EnableStrictMode();
+ }
ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
@@ -747,10 +752,16 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
scope);
TemporaryScope temp_scope(&this->temp_scope_);
+ if (info->strict_mode()) {
+ temp_scope.EnableStrictMode();
+ }
+
FunctionLiteralType type =
info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true;
- result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
+ result = ParseFunctionLiteral(name,
+ false, // Strict mode name already checked.
+ RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
// The only errors should be stack overflows.
@@ -1439,8 +1450,10 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) {
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ bool is_reserved = false;
+ Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
+ is_reserved,
function_token_position,
DECLARATION,
CHECK_OK);
@@ -1699,7 +1712,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
- bool starts_with_idenfifier = (peek() == Token::IDENTIFIER);
+ bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr &&
expr->AsVariableProxy() != NULL &&
@@ -2688,9 +2701,12 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
Handle<String> name;
- if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
- result = ParseFunctionLiteral(name, function_token_position,
- NESTED, CHECK_OK);
+ bool is_reserved_name = false;
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+ }
+ result = ParseFunctionLiteral(name, is_reserved_name,
+ function_token_position, NESTED, CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -2759,6 +2775,11 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
case Token::IDENTIFIER:
return ReportMessage("unexpected_token_identifier",
Vector<const char*>::empty());
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessage(temp_scope_->StrictMode() ?
+ "unexpected_strict_reserved" :
+ "unexpected_token_identifier",
+ Vector<const char*>::empty());
default:
const char* name = Token::String(token);
ASSERT(name != NULL);
@@ -2814,7 +2835,8 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
result = new Literal(Factory::false_value());
break;
- case Token::IDENTIFIER: {
+ case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
result = top_scope_->NewUnresolved(name, inside_with());
@@ -3221,6 +3243,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
Token::Value next = Next();
bool is_keyword = Token::IsKeyword(next);
if (next == Token::IDENTIFIER || next == Token::NUMBER ||
+ next == Token::FUTURE_RESERVED_WORD ||
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
@@ -3230,6 +3253,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
}
FunctionLiteral* value =
ParseFunctionLiteral(name,
+ false, // reserved words are allowed here
RelocInfo::kNoPosition,
DECLARATION,
CHECK_OK);
@@ -3272,6 +3296,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Scanner::Location loc = scanner().peek_location();
switch (next) {
+ case Token::FUTURE_RESERVED_WORD:
case Token::IDENTIFIER: {
bool is_getter = false;
bool is_setter = false;
@@ -3420,6 +3445,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
+ bool name_is_reserved,
int function_token_position,
FunctionLiteralType type,
bool* ok) {
@@ -3453,10 +3479,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int start_pos = scanner().location().beg_pos;
Scanner::Location name_loc = Scanner::NoLocation();
Scanner::Location dupe_loc = Scanner::NoLocation();
+ Scanner::Location reserved_loc = Scanner::NoLocation();
bool done = (peek() == Token::RPAREN);
while (!done) {
- Handle<String> param_name = ParseIdentifier(CHECK_OK);
+ bool is_reserved = false;
+ Handle<String> param_name =
+ ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
@@ -3465,6 +3494,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
dupe_loc = scanner().location();
}
+ if (!reserved_loc.IsValid() && is_reserved) {
+ reserved_loc = scanner().location();
+ }
Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
top_scope_->AddParameter(parameter);
@@ -3545,7 +3577,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
- ReportMessageAt(Scanner::Location(position, start_pos),
+ Scanner::Location location = Scanner::Location(position, start_pos);
+ ReportMessageAt(location,
"strict_function_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -3562,6 +3595,22 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
*ok = false;
return NULL;
}
+ if (name_is_reserved) {
+ int position = function_token_position != RelocInfo::kNoPosition
+ ? function_token_position
+ : (start_pos > 0 ? start_pos - 1 : start_pos);
+ Scanner::Location location = Scanner::Location(position, start_pos);
+ ReportMessageAt(location, "strict_reserved_word",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ if (reserved_loc.IsValid()) {
+ ReportMessageAt(reserved_loc, "strict_reserved_word",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
}
@@ -3633,6 +3682,13 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
+bool Parser::peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD;
+}
+
+
void Parser::Consume(Token::Value token) {
Token::Value next = Next();
USE(next);
@@ -3692,7 +3748,22 @@ Literal* Parser::GetLiteralNumber(double value) {
Handle<String> Parser::ParseIdentifier(bool* ok) {
- Expect(Token::IDENTIFIER, ok);
+ bool is_reserved;
+ return ParseIdentifierOrReservedWord(&is_reserved, ok);
+}
+
+
+Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
+ bool* ok) {
+ *is_reserved = false;
+ if (temp_scope_->StrictMode()) {
+ Expect(Token::IDENTIFIER, ok);
+ } else {
+ if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_RESERVED_WORD, ok);
+ *is_reserved = true;
+ }
+ }
if (!*ok) return Handle<String>();
return GetSymbol(ok);
}
@@ -3700,7 +3771,9 @@ Handle<String> Parser::ParseIdentifier(bool* ok) {
Handle<String> Parser::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
- if (next != Token::IDENTIFIER && !Token::IsKeyword(next)) {
+ if (next != Token::IDENTIFIER &&
+ next != Token::FUTURE_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
ReportUnexpectedToken(next);
*ok = false;
return Handle<String>();
@@ -3740,20 +3813,18 @@ void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'. The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
bool* ok) {
- Expect(Token::IDENTIFIER, ok);
+ Handle<String> result = ParseIdentifier(ok);
if (!*ok) return Handle<String>();
if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
const char* token = scanner().literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
- return GetSymbol(ok);
+ return result;
}
@@ -3895,6 +3966,7 @@ Handle<Object> JsonParser::ParseJson(Handle<String> script,
message = "unexpected_token_string";
break;
case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD:
message = "unexpected_token_identifier";
break;
default:
@@ -3941,16 +4013,10 @@ Handle<String> JsonParser::GetString() {
Handle<Object> JsonParser::ParseJsonValue() {
Token::Value token = scanner_.Next();
switch (token) {
- case Token::STRING: {
+ case Token::STRING:
return GetString();
- }
- case Token::NUMBER: {
- ASSERT(scanner_.is_literal_ascii());
- double value = StringToDouble(scanner_.literal_ascii_string(),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- return Factory::NewNumber(value);
- }
+ case Token::NUMBER:
+ return Factory::NewNumber(scanner_.number());
case Token::FALSE_LITERAL:
return Factory::false_value();
case Token::TRUE_LITERAL:
@@ -5024,7 +5090,9 @@ bool ParserApi::Parse(CompilationInfo* info) {
ASSERT(Top::has_pending_exception());
} else {
Handle<String> source = Handle<String>(String::cast(script->source()));
- result = parser.ParseProgram(source, info->is_global());
+ result = parser.ParseProgram(source,
+ info->is_global(),
+ info->StrictMode());
}
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 68983b42cc..aa8d525cd1 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -423,7 +423,8 @@ class Parser {
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
- bool in_global_context);
+ bool in_global_context,
+ StrictModeFlag strict_mode);
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
@@ -446,6 +447,7 @@ class Parser {
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
+ StrictModeFlag strict_mode,
ZoneScope* zone_scope);
// Report syntax error
@@ -546,6 +548,7 @@ class Parser {
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
+ bool name_is_reserved,
int function_token_position,
FunctionLiteralType type,
bool* ok);
@@ -575,6 +578,8 @@ class Parser {
return scanner().Next();
}
+ bool peek_any_identifier();
+
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
bool Check(Token::Value token);
@@ -608,6 +613,7 @@ class Parser {
Literal* GetLiteralNumber(double value);
Handle<String> ParseIdentifier(bool* ok);
+ Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
Handle<String> ParseIdentifierName(bool* ok);
Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index c0dcc0b4a1..252e88f465 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -83,6 +83,7 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_string", NULL);
case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_identifier", NULL);
default:
@@ -790,7 +791,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
Expression result = kUnknownExpression;
if (peek() == i::Token::FUNCTION) {
Consume(i::Token::FUNCTION);
- if (peek() == i::Token::IDENTIFIER) {
+ if (peek_any_identifier()) {
ParseIdentifier(CHECK_OK);
}
result = ParseFunctionLiteral(CHECK_OK);
@@ -858,7 +859,8 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
break;
}
- case i::Token::IDENTIFIER: {
+ case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD: {
ParseIdentifier(CHECK_OK);
result = kIdentifierExpression;
break;
@@ -946,7 +948,8 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
while (peek() != i::Token::RBRACE) {
i::Token::Value next = peek();
switch (next) {
- case i::Token::IDENTIFIER: {
+ case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
@@ -954,6 +957,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
i::Token::Value name = Next();
bool is_keyword = i::Token::IsKeyword(name);
if (name != i::Token::IDENTIFIER &&
+ name != i::Token::FUTURE_RESERVED_WORD &&
name != i::Token::NUMBER &&
name != i::Token::STRING &&
!is_keyword) {
@@ -1151,7 +1155,9 @@ PreParser::Expression PreParser::GetStringSymbol() {
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- Expect(i::Token::IDENTIFIER, ok);
+ if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
+ Expect(i::Token::IDENTIFIER, ok);
+ }
if (!*ok) return kUnknownIdentifier;
return GetIdentifierSymbol();
}
@@ -1166,7 +1172,8 @@ PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
i::StrLength(keyword)));
return kUnknownExpression;
}
- if (next == i::Token::IDENTIFIER) {
+ if (next == i::Token::IDENTIFIER ||
+ next == i::Token::FUTURE_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1175,19 +1182,23 @@ PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'. The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
bool* ok) {
- Expect(i::Token::IDENTIFIER, CHECK_OK);
+ PreParser::Identifier result = ParseIdentifier(CHECK_OK);
if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
const char* token = scanner_->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
- return GetIdentifierSymbol();
+ return result;
+}
+
+bool PreParser::peek_any_identifier() {
+ i::Token::Value next = peek();
+ return next == i::Token::IDENTIFIER ||
+ next == i::Token::FUTURE_RESERVED_WORD;
}
#undef CHECK_OK
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 66fad3bcbf..b7fa6c73bd 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -243,6 +243,8 @@ class PreParser {
return scanner_->Next();
}
+ bool peek_any_identifier();
+
void Consume(i::Token::Value token) { Next(); }
void Expect(i::Token::Value token, bool* ok) {
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 211f3f6360..dda7abbb3f 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -297,13 +297,13 @@ void PrettyPrinter::VisitSlot(Slot* node) {
Print("parameter[%d]", node->index());
break;
case Slot::LOCAL:
- Print("frame[%d]", node->index());
+ Print("local[%d]", node->index());
break;
case Slot::CONTEXT:
- Print(".context[%d]", node->index());
+ Print("context[%d]", node->index());
break;
case Slot::LOOKUP:
- Print(".context[");
+ Print("lookup[");
PrintLiteral(node->var()->name(), false);
Print("]");
break;
@@ -999,24 +999,7 @@ void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
void AstPrinter::VisitSlot(Slot* node) {
PrintIndented("SLOT ");
- switch (node->type()) {
- case Slot::PARAMETER:
- Print("parameter[%d]", node->index());
- break;
- case Slot::LOCAL:
- Print("frame[%d]", node->index());
- break;
- case Slot::CONTEXT:
- Print(".context[%d]", node->index());
- break;
- case Slot::LOOKUP:
- Print(".context[");
- PrintLiteral(node->var()->name(), false);
- Print("]");
- break;
- default:
- UNREACHABLE();
- }
+ PrettyPrinter::VisitSlot(node);
Print("\n");
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index d55a201e59..4994378cd3 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -644,6 +644,90 @@ static void GetOwnPropertyImplementation(JSObject* obj,
}
+static bool CheckAccessException(LookupResult* result,
+ v8::AccessType access_type) {
+ if (result->type() == CALLBACKS) {
+ Object* callback = result->GetCallbackObject();
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ bool can_access =
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ return can_access;
+ }
+ }
+
+ return false;
+}
+
+
+static bool CheckAccess(JSObject* obj,
+ String* name,
+ LookupResult* result,
+ v8::AccessType access_type) {
+ ASSERT(result->IsProperty());
+
+ JSObject* holder = result->holder();
+ JSObject* current = obj;
+ while (true) {
+ if (current->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(current, name, access_type)) {
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
+ break;
+ }
+
+ if (current == holder) {
+ return true;
+ }
+
+ current = JSObject::cast(current->GetPrototype());
+ }
+
+ // API callbacks can have per callback access exceptions.
+ switch (result->type()) {
+ case CALLBACKS: {
+ if (CheckAccessException(result, access_type)) {
+ return true;
+ }
+ break;
+ }
+ case INTERCEPTOR: {
+ // If the object has an interceptor, try real named properties.
+ // Overwrite the result to fetch the correct property later.
+ holder->LookupRealNamedProperty(name, result);
+ if (result->IsProperty()) {
+ if (CheckAccessException(result, access_type)) {
+ return true;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ Top::ReportFailedAccessCheck(current, access_type);
+ return false;
+}
+
+
+// TODO(1095): we should traverse hidden prototype hierachy as well.
+static bool CheckElementAccess(JSObject* obj,
+ uint32_t index,
+ v8::AccessType access_type) {
+ if (obj->IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(obj, index, access_type)) {
+ return false;
+ }
+
+ return true;
+}
+
+
// Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX,
@@ -686,7 +770,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
// subsequent cases.
Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
+ Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, *substr);
@@ -699,8 +783,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: {
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- Handle<Object> element = GetElement(Handle<Object>(obj), index);
- elms->set(VALUE_INDEX, *element);
+ elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value());
@@ -708,7 +791,14 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
}
case JSObject::DICTIONARY_ELEMENT: {
- NumberDictionary* dictionary = obj->element_dictionary();
+ Handle<JSObject> holder = obj;
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return Heap::undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ holder = Handle<JSObject>(JSObject::cast(proto));
+ }
+ NumberDictionary* dictionary = holder->element_dictionary();
int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -718,14 +808,18 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
- elms->set(GETTER_INDEX, callbacks->get(0));
- elms->set(SETTER_INDEX, callbacks->get(1));
+ if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, callbacks->get(0));
+ }
+ if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, callbacks->get(1));
+ }
break;
}
case NORMAL:
// This is a data property.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+ elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
break;
default:
@@ -746,6 +840,10 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
return Heap::undefined_value();
}
+ if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
+ return Heap::false_value();
+ }
+
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
@@ -754,16 +852,22 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
if (is_js_accessor) {
// __defineGetter__/__defineSetter__ callback.
- FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
- elms->set(GETTER_INDEX, structure->get(0));
- elms->set(SETTER_INDEX, structure->get(1));
+
+ FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
+ if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, structure->get(0));
+ }
+ if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, structure->get(1));
+ }
} else {
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
PropertyAttributes attrs;
Object* value;
+ // GetProperty will check access and report any violations.
{ MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
if (!maybe_value->ToObject(&value)) return maybe_value;
}
@@ -3487,8 +3591,10 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
HandleScope scope;
Handle<String> str = args.at<String>(0);
int index = Smi::cast(args[1])->value();
- Handle<Object> result = GetCharAt(str, index);
- return *result;
+ if (index >= 0 && index < str->length()) {
+ Handle<Object> result = GetCharAt(str, index);
+ return *result;
+ }
}
// Fall back to GetObjectProperty.
@@ -3496,7 +3602,12 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
args.at<Object>(1));
}
-
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4b - define a new accessor property.
+// Steps 9c & 12 - replace an existing data property with an accessor property.
+// Step 12 - update an existing accessor property with an accessor or generic
+// descriptor.
static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
ASSERT(args.length() == 5);
HandleScope scope;
@@ -3528,6 +3639,12 @@ static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
}
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4a - define a new data property.
+// Steps 9b & 12 - replace an existing accessor property with a data property.
+// Step 12 - update an existing data property with a data or generic
+// descriptor.
static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
ASSERT(args.length() == 4);
HandleScope scope;
@@ -3551,7 +3668,9 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
is_element) {
// Normalize the elements to enable attributes on the property.
- NormalizeElements(js_object);
+ if (!js_object->IsJSGlobalProxy()) {
+ NormalizeElements(js_object);
+ }
Handle<NumberDictionary> dictionary(js_object->element_dictionary());
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
@@ -3571,7 +3690,9 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
if (result.IsProperty() &&
(attr != result.GetAttributes() || result.type() == CALLBACKS)) {
// New attributes - normalize to avoid writing to instance descriptor
- NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!js_object->IsJSGlobalProxy()) {
+ NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+ }
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
return js_object->SetLocalPropertyIgnoreAttributes(*name,
@@ -4167,7 +4288,7 @@ static MaybeObject* Runtime_ToSlowProperties(Arguments args) {
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
+ if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
}
@@ -6889,7 +7010,7 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
// the AST id matching the PC.
Address start = unoptimized->instruction_start();
unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_start();
+ Address table_cursor = start + unoptimized->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize;
for (unsigned i = 0; i < table_length; ++i) {
@@ -7553,7 +7674,8 @@ static MaybeObject* Runtime_CompileString(Arguments args) {
Handle<Context> context(Top::context()->global_context());
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context,
- true);
+ true,
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
@@ -7562,13 +7684,15 @@ static MaybeObject* Runtime_CompileString(Arguments args) {
static ObjectPair CompileGlobalEval(Handle<String> source,
- Handle<Object> receiver) {
+ Handle<Object> receiver,
+ StrictModeFlag mode) {
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(Top::context()),
- Top::context()->IsGlobalContext());
+ Top::context()->IsGlobalContext(),
+ mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
shared,
@@ -7579,7 +7703,7 @@ static ObjectPair CompileGlobalEval(Handle<String> source,
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
@@ -7643,12 +7767,16 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
- return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+ ASSERT(args[3]->IsSmi());
+ return CompileGlobalEval(args.at<String>(1),
+ args.at<Object>(2),
+ static_cast<StrictModeFlag>(
+ Smi::cast(args[3])->value()));
}
static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
@@ -7663,7 +7791,11 @@ static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
- return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+ ASSERT(args[3]->IsSmi());
+ return CompileGlobalEval(args.at<String>(1),
+ args.at<Object>(2),
+ static_cast<StrictModeFlag>(
+ Smi::cast(args[3])->value()));
}
@@ -9800,10 +9932,14 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
source_str_length));
+
+ // Currently, the eval code will be executed in non-strict mode,
+ // even in the strict code context.
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(function_source,
context,
- context->IsGlobalContext());
+ context->IsGlobalContext(),
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Factory::NewFunctionFromSharedFunctionInfo(shared, context);
@@ -9885,10 +10021,10 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
}
// Compile the source to be evaluated.
+ // Currently, the eval code will be executed in non-strict mode,
+ // even in the strict code context.
Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source,
- context,
- is_global);
+ Compiler::CompileEval(source, context, is_global, kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index b201eb8614..fb2ff93c6f 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -237,8 +237,8 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 3, 2) \
- F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \
+ F(ResolvePossiblyDirectEval, 4, 2) \
+ F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 34e9cf4aba..d2ec54c388 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -58,7 +58,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code;
- Address header = code->instruction_start() + code->safepoint_table_start();
+ Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
@@ -230,4 +230,24 @@ uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
}
+int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
+ int result = 0;
+ if (!deoptimization_info_.is_empty()) {
+ unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
+ for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
+ DeoptimizationInfo info = deoptimization_info_[i];
+ if (static_cast<int>(info.deoptimization_index) !=
+ Safepoint::kNoDeoptimizationIndex) {
+ if (previous_gap_end + limit > info.pc) {
+ result++;
+ }
+ previous_gap_end = info.pc_after_gap;
+ }
+ }
+ }
+ return result;
+}
+
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index eeeae37fbe..8803d06f5b 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -220,8 +220,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
int arguments,
int deoptimization_index);
- // Update the last safepoint with the size of the code generated for the gap
- // following it.
+ // Update the last safepoint with the size of the code generated until the
+ // end of the gap following it.
void SetPcAfterGap(int pc) {
ASSERT(!deoptimization_info_.is_empty());
int index = deoptimization_info_.length() - 1;
@@ -232,6 +232,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
// entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry);
+ // Count the number of deoptimization points where the next
+ // following deoptimization point comes less than limit bytes
+ // after the end of this point's gap.
+ int CountShortDeoptimizationIntervals(unsigned limit);
+
private:
struct DeoptimizationInfo {
unsigned pc;
@@ -247,8 +252,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_;
- bool emitted_;
unsigned offset_;
+ bool emitted_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
diff --git a/deps/v8/src/scanner-base.cc b/deps/v8/src/scanner-base.cc
index fe33f38e72..80bca4e282 100644
--- a/deps/v8/src/scanner-base.cc
+++ b/deps/v8/src/scanner-base.cc
@@ -796,25 +796,27 @@ KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL },
{ NULL, D, Token::ILLEGAL },
- { "else", KEYWORD_PREFIX, Token::ELSE },
+ { NULL, E, Token::ILLEGAL },
{ NULL, F, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, I, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, N, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, P, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ "return", KEYWORD_PREFIX, Token::RETURN },
- { "switch", KEYWORD_PREFIX, Token::SWITCH },
+ { NULL, S, Token::ILLEGAL },
{ NULL, T, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, V, Token::ILLEGAL },
- { NULL, W, Token::ILLEGAL }
+ { NULL, W, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
};
@@ -822,7 +824,7 @@ void KeywordMatcher::Step(unibrow::uchar input) {
switch (state_) {
case INITIAL: {
// matching the first character is the only state with significant fanout.
- // Match only lower-case letters in range 'b'..'w'.
+ // Match only lower-case letters in range 'b'..'y'.
unsigned int offset = input - kFirstCharRangeMin;
if (offset < kFirstCharRangeLength) {
state_ = first_states_[offset].state;
@@ -850,6 +852,8 @@ void KeywordMatcher::Step(unibrow::uchar input) {
break;
case C:
if (MatchState(input, 'a', CA)) return;
+ if (MatchKeywordStart(input, "class", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'o', CO)) return;
break;
case CA:
@@ -872,6 +876,18 @@ void KeywordMatcher::Step(unibrow::uchar input) {
if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
break;
+ case E:
+ if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
+ if (MatchKeywordStart(input, "enum", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchState(input, 'x', EX)) return;
+ break;
+ case EX:
+ if (MatchKeywordStart(input, "export", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "extends", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
case F:
if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
@@ -880,10 +896,22 @@ void KeywordMatcher::Step(unibrow::uchar input) {
break;
case I:
if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
+ if (MatchState(input, 'm', IM)) return;
if (MatchKeyword(input, 'n', IN, Token::IN)) return;
break;
+ case IM:
+ if (MatchState(input, 'p', IMP)) return;
+ break;
+ case IMP:
+ if (MatchKeywordStart(input, "implements", 3,
+ Token::FUTURE_RESERVED_WORD )) return;
+ if (MatchKeywordStart(input, "import", 3,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
case IN:
token_ = Token::IDENTIFIER;
+ if (MatchKeywordStart(input, "interface", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
break;
case N:
@@ -891,6 +919,27 @@ void KeywordMatcher::Step(unibrow::uchar input) {
if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
break;
+ case P:
+ if (MatchKeywordStart(input, "package", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchState(input, 'r', PR)) return;
+ if (MatchKeywordStart(input, "public", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
+ case PR:
+ if (MatchKeywordStart(input, "private", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "protected", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
+ case S:
+ if (MatchKeywordStart(input, "static", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "super", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "switch", 1,
+ Token::SWITCH)) return;
+ break;
case T:
if (MatchState(input, 'h', TH)) return;
if (MatchState(input, 'r', TR)) return;
diff --git a/deps/v8/src/scanner-base.h b/deps/v8/src/scanner-base.h
index 7ac1d358e1..f5fe7f7cef 100644
--- a/deps/v8/src/scanner-base.h
+++ b/deps/v8/src/scanner-base.h
@@ -564,10 +564,17 @@ class KeywordMatcher {
CON,
D,
DE,
+ E,
+ EX,
F,
I,
+ IM,
+ IMP,
IN,
N,
+ P,
+ PR,
+ S,
T,
TH,
TR,
@@ -583,7 +590,7 @@ class KeywordMatcher {
// Range of possible first characters of a keyword.
static const unsigned int kFirstCharRangeMin = 'b';
- static const unsigned int kFirstCharRangeMax = 'w';
+ static const unsigned int kFirstCharRangeMax = 'y';
static const unsigned int kFirstCharRangeLength =
kFirstCharRangeMax - kFirstCharRangeMin + 1;
// State map for first keyword character range.
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index b66d10b988..cab8c58bd8 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -516,17 +516,30 @@ Token::Value JsonScanner::ScanJsonString() {
Token::Value JsonScanner::ScanJsonNumber() {
LiteralScope literal(this);
- if (c0_ == '-') AddLiteralCharAdvance();
+ bool negative = false;
+
+ if (c0_ == '-') {
+ AddLiteralCharAdvance();
+ negative = true;
+ }
if (c0_ == '0') {
AddLiteralCharAdvance();
// Prefix zero is only allowed if it's the only digit before
// a decimal point or exponent.
if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
} else {
+ int i = 0;
+ int digits = 0;
if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
do {
+ i = i * 10 + c0_ - '0';
+ digits++;
AddLiteralCharAdvance();
} while (c0_ >= '0' && c0_ <= '9');
+ if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+ number_ = (negative ? -i : i);
+ return Token::NUMBER;
+ }
}
if (c0_ == '.') {
AddLiteralCharAdvance();
@@ -544,6 +557,10 @@ Token::Value JsonScanner::ScanJsonNumber() {
} while (c0_ >= '0' && c0_ <= '9');
}
literal.Complete();
+ ASSERT_NOT_NULL(next_.literal_chars);
+ number_ = StringToDouble(next_.literal_chars->ascii_literal(),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
return Token::NUMBER;
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index d7621825a9..cf2084f55e 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -148,6 +148,12 @@ class JsonScanner : public Scanner {
// Returns the next token.
Token::Value Next();
+ // Returns the value of a number token.
+ double number() {
+ return number_;
+ }
+
+
protected:
// Skip past JSON whitespace (only space, tab, newline and carrige-return).
bool SkipJsonWhiteSpace();
@@ -178,6 +184,9 @@ class JsonScanner : public Scanner {
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
+ // Holds the value of a scanned number token.
+ double number_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index d3f54ad3f2..fd573b03b8 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -383,8 +383,7 @@ void Scope::AddDeclaration(Declaration* declaration) {
void Scope::SetIllegalRedeclaration(Expression* expression) {
- // Only set the illegal redeclaration expression the
- // first time the function is called.
+ // Record only the first illegal redeclaration.
if (!HasIllegalRedeclaration()) {
illegal_redecl_ = expression;
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 15f128da4c..731714138b 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -441,6 +441,12 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
+ // Using NORMAL as the PropertyType for array element loads is a misuse. The
+ // generated stub always accesses fast elements, not slow-mode fields, but
+ // some property type is required for the stub lookup. Note that overloading
+ // the NORMAL PropertyType is only safe as long as no stubs are generated for
+ // other keyed field loads. This is guaranteed to be the case since all field
+ // keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
String* name = Heap::KeyedLoadSpecialized_symbol();
@@ -461,6 +467,33 @@ MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
}
+MaybeObject* StubCache::ComputeKeyedLoadPixelArray(JSObject* receiver) {
+ // Using NORMAL as the PropertyType for array element loads is a misuse. The
+ // generated stub always accesses fast elements, not slow-mode fields, but
+ // some property type is required for the stub lookup. Note that overloading
+ // the NORMAL PropertyType is only safe as long as no stubs are generated for
+ // other keyed field loads. This is guaranteed to be the case since all field
+ // keyed loads that are not array elements go through a generic builtin stub.
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
+ String* name = Heap::KeyedLoadPixelArray_symbol();
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ { MaybeObject* maybe_code = compiler.CompileLoadPixelArray(receiver);
+ if (!maybe_code->ToObject(&code)) return maybe_code;
+ }
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+ Object* result;
+ { MaybeObject* maybe_result =
+ receiver->UpdateMapCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ }
+ return code;
+}
+
+
MaybeObject* StubCache::ComputeStoreField(String* name,
JSObject* receiver,
int field_index,
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 07f894a512..6d78279833 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -133,6 +133,9 @@ class StubCache : public AllStatic {
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
JSObject* receiver);
+ MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadPixelArray(
+ JSObject* receiver);
+
// ---
MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
@@ -607,6 +610,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileLoadPixelArray(JSObject* receiver);
private:
MaybeObject* GetCode(PropertyType type, String* name);
diff --git a/deps/v8/src/third_party/strongtalk/README.chromium b/deps/v8/src/third_party/strongtalk/README.chromium
deleted file mode 100644
index ba2b789f38..0000000000
--- a/deps/v8/src/third_party/strongtalk/README.chromium
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Strongtalk
-URL: http://www.strongtalk.org/
-
-Code from the Strongtalk assembler is used with modification in the following
-files:
-
-src/assembler.h
-src/assembler.cc
-src/arm/assembler-arm.cc
-src/arm/assembler-arm.h
-src/arm/assembler-arm-inl.h
-src/ia32/assembler-ia32.cc
-src/ia32/assembler-ia32.h
-src/ia32/assembler-ia32-inl.h
-src/mips/assembler-mips.cc
-src/mips/assembler-mips.h
-src/mips/assembler-mips-inl.h
-src/x64/assembler-x64.h
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index fb890d2342..776d9f3bc1 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -155,38 +155,6 @@ namespace internal {
K(WHILE, "while", 0) \
K(WITH, "with", 0) \
\
- /* Future reserved words (ECMA-262, section 7.5.3, page 14). */ \
- F(ABSTRACT, "abstract", 0) \
- F(BOOLEAN, "boolean", 0) \
- F(BYTE, "byte", 0) \
- F(CHAR, "char", 0) \
- F(CLASS, "class", 0) \
- K(CONST, "const", 0) \
- F(DOUBLE, "double", 0) \
- F(ENUM, "enum", 0) \
- F(EXPORT, "export", 0) \
- F(EXTENDS, "extends", 0) \
- F(FINAL, "final", 0) \
- F(FLOAT, "float", 0) \
- F(GOTO, "goto", 0) \
- F(IMPLEMENTS, "implements", 0) \
- F(IMPORT, "import", 0) \
- F(INT, "int", 0) \
- F(INTERFACE, "interface", 0) \
- F(LONG, "long", 0) \
- K(NATIVE, "native", 0) \
- F(PACKAGE, "package", 0) \
- F(PRIVATE, "private", 0) \
- F(PROTECTED, "protected", 0) \
- F(PUBLIC, "public", 0) \
- F(SHORT, "short", 0) \
- F(STATIC, "static", 0) \
- F(SUPER, "super", 0) \
- F(SYNCHRONIZED, "synchronized", 0) \
- F(THROWS, "throws", 0) \
- F(TRANSIENT, "transient", 0) \
- F(VOLATILE, "volatile", 0) \
- \
/* Literals (ECMA-262, section 7.8, page 16). */ \
K(NULL_LITERAL, "null", 0) \
K(TRUE_LITERAL, "true", 0) \
@@ -197,6 +165,11 @@ namespace internal {
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, NULL, 0) \
\
+ /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
+ T(FUTURE_RESERVED_WORD, NULL, 0) \
+ K(CONST, "const", 0) \
+ K(NATIVE, "native", 0) \
+ \
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
\
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 5b0fd6157c..9d8aa8227f 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -32,11 +32,11 @@
#include "compilation-cache.h"
#include "frames-inl.h"
#include "runtime-profiler.h"
-#include "simulator.h"
namespace v8 {
namespace internal {
+class Simulator;
#define RETURN_IF_SCHEDULED_EXCEPTION() \
if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index c7029c8823..34ff58452f 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -120,9 +120,9 @@ class TypeInfo {
}
- // Integer32 is an integer that can be represented as either a signed
- // 32-bit integer or as an unsigned 32-bit integer. It has to be
- // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+ // Integer32 is an integer that can be represented as a signed
+ // 32-bit integer. It has to be
+ // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
// as it is not an Integer32.
static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0);
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 3adab83d87..179fa92863 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -205,7 +205,7 @@ function Decode(uri, reserved) {
octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
for (var i = 1; i < n; i++) {
- k++;
+ if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
}
index = URIDecodeOctets(octets, result, index);
@@ -412,4 +412,3 @@ function SetupURI() {
}
SetupURI();
-
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 3f27114bec..85bd17e0b1 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -469,6 +469,12 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
ARMv7 = 2, // ARM
SAHF = 0}; // x86
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+enum StrictModeFlag {
+ kNonStrictMode,
+ kStrictMode
+};
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 233f8b4de9..b0fb5bf171 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -491,28 +491,29 @@ PropertyDescriptor.prototype.hasSetter = function() {
}
+// Converts an array returned from Runtime_GetOwnProperty to an actual
+// property descriptor. For a description of the array layout please
+// see the runtime.cc file.
+function ConvertDescriptorArrayToDescriptor(desc_array) {
+ if (desc_array == false) {
+ throw 'Internal error: invalid desc_array';
+ }
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
- var desc = new PropertyDescriptor();
-
- // GetOwnProperty returns an array indexed by the constants
- // defined in macros.py.
- // If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(p));
-
- if (IS_UNDEFINED(props)) return void 0;
+ if (IS_UNDEFINED(desc_array)) {
+ return void 0;
+ }
- // This is an accessor
- if (props[IS_ACCESSOR_INDEX]) {
- desc.setGet(props[GETTER_INDEX]);
- desc.setSet(props[SETTER_INDEX]);
+ var desc = new PropertyDescriptor();
+ // This is an accessor.
+ if (desc_array[IS_ACCESSOR_INDEX]) {
+ desc.setGet(desc_array[GETTER_INDEX]);
+ desc.setSet(desc_array[SETTER_INDEX]);
} else {
- desc.setValue(props[VALUE_INDEX]);
- desc.setWritable(props[WRITABLE_INDEX]);
+ desc.setValue(desc_array[VALUE_INDEX]);
+ desc.setWritable(desc_array[WRITABLE_INDEX]);
}
- desc.setEnumerable(props[ENUMERABLE_INDEX]);
- desc.setConfigurable(props[CONFIGURABLE_INDEX]);
+ desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
+ desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
return desc;
}
@@ -535,9 +536,27 @@ function HasProperty(obj, p) {
}
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
+ // GetOwnProperty returns an array indexed by the constants
+ // defined in macros.py.
+ // If p is not a property on obj undefined is returned.
+ var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+ // A false value here means that access checks failed.
+ if (props == false) return void 0;
+
+ return ConvertDescriptorArrayToDescriptor(props);
+}
+
+
// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
- var current = GetOwnProperty(obj, p);
+ var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
+ // A false value here means that access checks failed.
+ if (current_or_access == false) return void 0;
+
+ var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
// Error handling according to spec.
@@ -545,10 +564,12 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (IS_UNDEFINED(current) && !extensible)
throw MakeTypeError("define_disallowed", ["defineProperty"]);
- if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+ if (!IS_UNDEFINED(current)) {
// Step 5 and 6
- if ((!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+ if ((IsGenericDescriptor(desc) ||
+ IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
+ (!desc.hasEnumerable() ||
+ SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() ||
SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
@@ -561,30 +582,36 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
SameValue(desc.getSet(), current.getSet()))) {
return true;
}
-
- // Step 7
- if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- // Step 9
- if (IsDataDescriptor(current) != IsDataDescriptor(desc))
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- // Step 10
- if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable())
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- if (!current.isWritable() && desc.hasValue() &&
- !SameValue(desc.getValue(), current.getValue())) {
+ if (!current.isConfigurable()) {
+ // Step 7
+ if (desc.isConfigurable() ||
+ (desc.hasEnumerable() &&
+ desc.isEnumerable() != current.isEnumerable()))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 8
+ if (!IsGenericDescriptor(desc)) {
+ // Step 9a
+ if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 10a
+ if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+ if (!current.isWritable() && desc.isWritable())
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (!current.isWritable() && desc.hasValue() &&
+ !SameValue(desc.getValue(), current.getValue())) {
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ }
+ // Step 11
+ if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+ if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
}
}
- // Step 11
- if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- }
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- }
}
// Send flags - enumerable and configurable are common - writable is
@@ -607,7 +634,16 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
} else
flag |= DONT_DELETE;
- if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+ if (IsDataDescriptor(desc) ||
+ (IsGenericDescriptor(desc) &&
+ (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
+ // There are 3 cases that lead here:
+ // Step 4a - defining a new data property.
+ // Steps 9b & 12 - replacing an existing accessor property with a data
+ // property.
+ // Step 12 - updating an existing data property with a data or generic
+ // descriptor.
+
if (desc.hasWritable()) {
flag |= desc.isWritable() ? 0 : READ_ONLY;
} else if (!IS_UNDEFINED(current)) {
@@ -615,20 +651,30 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
} else {
flag |= READ_ONLY;
}
+
var value = void 0; // Default value is undefined.
if (desc.hasValue()) {
value = desc.getValue();
- } else if (!IS_UNDEFINED(current)) {
+ } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
value = current.getValue();
}
+
%DefineOrRedefineDataProperty(obj, p, value, flag);
+ } else if (IsGenericDescriptor(desc)) {
+ // Step 12 - updating an existing accessor property with generic
+ // descriptor. Changing flags only.
+ %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
} else {
- if (desc.hasGetter() &&
- (IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) {
- %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+ // There are 3 cases that lead here:
+ // Step 4b - defining a new accessor property.
+ // Steps 9c & 12 - replacing an existing data property with an accessor
+ // property.
+ // Step 12 - updating an existing accessor property with an accessor
+ // descriptor.
+ if (desc.hasGetter()) {
+ %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
}
- if (desc.hasSetter() &&
- (IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
+ if (desc.hasSetter()) {
%DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
}
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index e17f78047a..ac1887d55a 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
-#define BUILD_NUMBER 1
+#define BUILD_NUMBER 2
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 999306e378..ef069a98bc 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -916,6 +916,23 @@ void Assembler::call(const Operand& op) {
}
+// Calls directly to the given address using a relative offset.
+// Should only ever be used in Code objects for calls within the
+// same Code object. Should not be used when generating new code (use labels),
+// but only when patching existing code.
+void Assembler::call(Address target) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp.
+ emit(0xE8);
+ Address source = pc_ + 4;
+ intptr_t displacement = target - source;
+ ASSERT(is_int32(displacement));
+ emitl(static_cast<int32_t>(displacement));
+}
+
+
void Assembler::clc() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -3012,6 +3029,16 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x50);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 29817a3161..c597783b5e 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -553,10 +553,12 @@ class Assembler : public Malloced {
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
- // The x64 JS return sequence is padded with int3 to make it large
- // enough to hold a call instruction when the debugger patches it.
+ // Some x64 JS code is padded with int3 to make it large
+ // enough to hold an instruction when the debugger patches it.
+ static const int kJumpInstructionLength = 13;
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
+ static const int kShortCallInstructionLength = 5;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
@@ -585,7 +587,7 @@ class Assembler : public Malloced {
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
+ // of m, where m must be a power of 2.
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -894,6 +896,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src);
}
+ void orl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+
void or_(const Operand& dst, Register src) {
arithmetic_op(0x09, src, dst);
}
@@ -1057,6 +1063,18 @@ class Assembler : public Malloced {
arithmetic_op_32(0x33, dst, src);
}
+ void xorl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x33, dst, src);
+ }
+
+ void xorl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
+ void xorl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
@@ -1111,6 +1129,12 @@ class Assembler : public Malloced {
void call(Label* L);
void call(Handle<Code> target, RelocInfo::Mode rmode);
+ // Calls directly to the given address using a relative offset.
+ // Should only ever be used in Code objects for calls within the
+ // same Code object. Should not be used when generating new code (use labels),
+ // but only when patching existing code.
+ void call(Address target);
+
// Call near absolute indirect, address in register
void call(Register adr);
@@ -1254,6 +1278,8 @@ class Assembler : public Malloced {
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void movmskpd(Register dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 40e6138677..b0cadda6d3 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1037,29 +1037,6 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
}
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- // Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
case TRBinaryOpIC::UNINITIALIZED:
@@ -1069,7 +1046,9 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateSmiStub(masm);
break;
case TRBinaryOpIC::INT32:
- GenerateInt32Stub(masm);
+ UNREACHABLE();
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
break;
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
@@ -1112,54 +1091,337 @@ const char* TypeRecordingBinaryOpStub::GetName() {
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- UNIMPLEMENTED();
-}
+ // We only generate heapnumber answers for overflowing calculations
+ // for the four basic arithmetic operations.
+ bool generate_inline_heapnumber_results =
+ (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+ (op_ == Token::ADD || op_ == Token::SUB ||
+ op_ == Token::MUL || op_ == Token::DIV);
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
+ // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+ Register left = rdx;
+ Register right = rax;
+
+
+ // Smi check of both operands. If op is BIT_OR, the check is delayed
+ // until after the OR operation.
+ Label not_smis;
+ Label use_fp_on_smis;
+ Label restore_MOD_registers; // Only used if op_ == Token::MOD.
+
+ if (op_ != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ // Perform the operation.
+ Comment perform_smi(masm, "-- Perform smi operation");
switch (op_) {
case Token::ADD:
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+
case Token::SUB:
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+
case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
case Token::DIV:
+ // SmiDiv will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiDiv(rax, left, right, &use_fp_on_smis);
break;
+
case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
+ // SmiMod will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiMod(rax, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::BIT_OR: {
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
+ break;
+ }
case Token::BIT_XOR:
- case Token::SAR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ __ movq(rax, left);
+ break;
+
case Token::SHR:
- GenerateRegisterArgsPush(masm);
+ __ SmiShiftLogicalRight(left, left, right, &not_smis);
+ __ movq(rax, left);
break;
+
default:
UNREACHABLE();
}
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
- } else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ // 5. Emit return of result in rax. Some operations have registers pushed.
+ __ ret(0);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
}
- __ bind(&call_runtime);
+
+
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ }
+
+ // 7. Non-smi operands reach the end of the code generated by
+ // GenerateSmiCode, and fall through to subsequent code,
+ // with the operands in rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ if (op_ == Token::BIT_OR) {
+ __ movq(right, rcx);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
+ MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
+ case Token::DIV: {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, allocation_failure);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we jump to the allocation_failure label, to call runtime.
+ __ jmp(allocation_failure);
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
+ heap_number_map);
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ __ Ret();
+
+ // Logical shift right can produce an unsigned int32 that is not
+ // an int32, and so is not in the smi range. Allocate a heap number
+ // in that case.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_shr_result);
+ Label allocation_failed;
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &allocation_failed,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ Ret();
+
+ __ bind(&allocation_failed);
+ // We need tagged values in rdx and rax for the following code,
+ // not int32 in rax and rcx.
+ __ Integer32ToSmi(rax, rcx);
+ __ Integer32ToSmi(rdx, rax);
+ __ jmp(allocation_failure);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ // No fall-through from this generated code.
+ if (FLAG_debug_code) {
+ __ Abort("Unexpected fall-through in "
+ "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ // Registers containing left and right operands respectively.
+ Register lhs = rdx;
+ Register rhs = rax;
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ __ JumpIfNotString(lhs, r8, &not_string1);
+
+ // First argument is a a string, test second.
+ __ JumpIfSmi(rhs, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ JumpIfNotString(rhs, rhs, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ // Pop arguments, because CallRuntimeCode wants to push them again.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
case Token::DIV:
- GenerateTypeTransition(masm);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
@@ -1167,30 +1429,90 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smi;
+
+ GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
+
+ __ bind(&not_smi);
+ GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ GenerateStringAddCode(masm);
+
+ GenerateTypeTransition(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label gc_required, not_number;
+ GenerateFloatingPointCode(masm, &gc_required, &not_number);
+
+ __ bind(&not_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&gc_required);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateStringAddCode(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
- UNIMPLEMENTED();
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in rdx is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rdx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ // Use object in rdx as a result holder
+ __ movq(rax, rdx);
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
}
@@ -1512,6 +1834,7 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
+// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
Register heap_number_map) {
@@ -1521,28 +1844,27 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label load_arg2, done;
__ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(r8, rdx);
__ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
+ __ movl(r8, Immediate(0));
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
+ // Get the untagged integer version of the rdx heap number in rcx.
+ IntegerConvert(masm, r8, rdx);
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ // Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
+ __ SmiToInteger32(rcx, rax);
__ jmp(&done);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
@@ -1558,7 +1880,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
// Get the untagged integer version of the rax heap number in rcx.
IntegerConvert(masm, rcx, rax);
__ bind(&done);
- __ movl(rax, rdx);
+ __ movl(rax, r8);
}
@@ -1888,11 +2210,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
// Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
+ // rsp[0]: return address
+ // rsp[8]: last_match_info (expected JSArray)
+ // rsp[16]: previous index
+ // rsp[24]: subject string
+ // rsp[32]: JSRegExp object
static const int kLastMatchInfoOffset = 1 * kPointerSize;
static const int kPreviousIndexOffset = 2 * kPointerSize;
@@ -2234,7 +2556,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // Allocate RegExpResult followed by FixedArray with size in rbx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
@@ -2293,7 +2615,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ j(less_equal, &done); // Jump if rcx is negative or zero.
__ subl(rbx, Immediate(1));
__ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
__ jmp(&loop);
@@ -2656,7 +2978,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// undefined, and are equal.
__ Set(rax, EQUAL);
__ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
+ // Return non-equal by returning the non-zero object pointer in rax,
// or return equal if we fell through to here.
__ ret(0);
__ bind(&not_both_objects);
@@ -3151,7 +3473,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
+ // If current RBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ movq(kScratchRegister, js_entry_sp);
__ cmpq(rbp, Operand(kScratchRegister, 0));
@@ -4414,6 +4736,53 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ jmp(rdi);
}
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // elements - is set to the the receiver's element if
+ // the receiver doesn't have a pixel array or the
+ // key is not a smi, otherwise it's the elements'
+ // external pointer.
+ // untagged_key - is set to the untagged key
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiToInteger32(untagged_key, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+ // Check that the smi is in range.
+ __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Load and tag the element as a smi.
+ __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
+ __ Integer32ToSmi(result, result);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 9feced2f94..8051d4bdbe 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -270,6 +270,11 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure);
+ void GenerateStringAddCode(MacroAssembler* masm);
+ void GenerateCallRuntimeCode(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
@@ -447,6 +452,25 @@ class NumberToStringStub: public CodeStub {
};
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 91686f9206..b8069a2cf9 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -5402,9 +5402,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result);
slow.Bind();
@@ -5421,8 +5424,11 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index b308f64ce9..c283db3a07 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -357,6 +357,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 29b9023d42..ed6c47bf99 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -42,10 +42,65 @@ int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
+ return MacroAssembler::kCallInstructionLength;
}
+#ifdef DEBUG
+// Overwrites code with int3 instructions.
+static void ZapCodeRange(Address from, Address to) {
+ CHECK(from <= to);
+ int length = static_cast<int>(to - from);
+ CodePatcher destroyer(from, length);
+ while (length-- > 0) {
+ destroyer.masm()->int3();
+ }
+}
+#endif
+
+
+// Iterate through the entries of a SafepointTable that corresponds to
+// deoptimization points.
+class SafepointTableDeoptimiztionEntryIterator {
+ public:
+ explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
+ : code_(code), table_(code), index_(-1), limit_(table_.length()) {
+ FindNextIndex();
+ }
+
+ SafepointEntry Next(Address* pc) {
+ if (index_ >= limit_) {
+ *pc = NULL;
+ return SafepointEntry(); // Invalid entry.
+ }
+ *pc = code_->instruction_start() + table_.GetPcOffset(index_);
+ SafepointEntry entry = table_.GetEntry(index_);
+ FindNextIndex();
+ return entry;
+ }
+
+ private:
+ void FindNextIndex() {
+ ASSERT(index_ < limit_);
+ while (++index_ < limit_) {
+ if (table_.GetEntry(index_).deoptimization_index() !=
+ Safepoint::kNoDeoptimizationIndex) {
+ return;
+ }
+ }
+ }
+
+ Code* code_;
+ SafepointTable table_;
+ // Index of next deoptimization entry. If negative after calling
+ // FindNextIndex, there are no more, and Next will return an invalid
+ // SafepointEntry.
+ int index_;
+ // Table length.
+ int limit_;
+};
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation;
@@ -59,42 +114,74 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
+ // corresponding deoptimization entry, or a short call to an absolute
+ // jump if space is short. The absolute jumps are put in a table just
+ // before the safepoint table (space was allocated there when the Code
+ // object was created, if necessary).
+
+ Address instruction_start = function->code()->instruction_start();
+ Address jump_table_address =
+ instruction_start + function->code()->safepoint_table_offset();
+ Address previous_pc = instruction_start;
+
+ SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
+ Address entry_pc = NULL;
+
+ SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
+ while (current_entry.is_valid()) {
+ int gap_code_size = current_entry.gap_code_size();
+ unsigned deoptimization_index = current_entry.deoptimization_index();
+
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
- unsigned instructions = pc_offset - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
+ ZapCodeRange(previous_pc, entry_pc);
#endif
- last_pc_offset = pc_offset;
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- last_pc_offset += gap_code_size;
- CodePatcher patcher(code->instruction_start() + last_pc_offset,
- patch_size());
+ // Position where Call will be patched in.
+ Address call_address = entry_pc + gap_code_size;
+ // End of call instruction, if using a direct call to a 64-bit address.
+ Address call_end_address =
+ call_address + MacroAssembler::kCallInstructionLength;
+
+ // Find next deoptimization entry, if any.
+ Address next_pc = NULL;
+ SafepointEntry next_entry = deoptimizations.Next(&next_pc);
+
+ if (!next_entry.is_valid() || next_pc >= call_end_address) {
+ // Room enough to write a long call instruction.
+ CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
- last_pc_offset += patch_size();
+ previous_pc = call_end_address;
+ } else {
+ // Not room enough for a long Call instruction. Write a short call
+ // instruction to a long jump placed elsewhere in the code.
+ Address short_call_end_address =
+ call_address + MacroAssembler::kShortCallInstructionLength;
+ ASSERT(next_pc >= short_call_end_address);
+
+ // Write jump in jump-table.
+ jump_table_address -= MacroAssembler::kJumpInstructionLength;
+ CodePatcher jump_patcher(jump_table_address,
+ MacroAssembler::kJumpInstructionLength);
+ jump_patcher.masm()->Jump(
+ GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+
+ // Write call to jump at call_offset.
+ CodePatcher call_patcher(call_address,
+ MacroAssembler::kShortCallInstructionLength);
+ call_patcher.masm()->call(jump_table_address);
+ previous_pc = short_call_end_address;
}
+
+ // Continue with next deoptimization entry.
+ current_entry = next_entry;
+ entry_pc = next_pc;
}
+
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
- CHECK(code->safepoint_table_start() >= last_pc_offset);
- unsigned instructions = code->safepoint_table_start() - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
+ ZapCodeRange(previous_pc, jump_table_address);
#endif
// Add the deoptimizing code to the list.
@@ -390,7 +477,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(rbx, offset));
}
- // Fill in the double input registers.
+ // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
@@ -404,7 +491,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ addq(rsp, Immediate(2 * kPointerSize));
}
- // Compute a pointer to the unwinding limit in register ecx; that is
+ // Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 92a8a7d3cd..f73f94845a 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1046,6 +1046,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x50) {
+ mnemonic = "movmskpd";
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index a2a0e7e9ff..998b3e9fc9 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -31,7 +31,7 @@
namespace v8 {
namespace internal {
-static const int kNumRegs = 8;
+static const int kNumRegs = 16;
static const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
@@ -44,8 +44,7 @@ static const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-// TODO(x64): This should not be 0.
-static const int kNumSafepointRegisters = 8;
+static const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 2a30bb8cfe..9144874c89 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1529,14 +1529,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
__ j(smi, &smi_case);
__ bind(&stub_call);
- GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
- if (stub.ArgsInRegistersSupported()) {
- stub.GenerateCall(masm_, rdx, rcx);
- } else {
- __ push(rdx);
- __ push(rcx);
- __ CallStub(&stub);
- }
+ TypeRecordingBinaryOpStub stub(op, mode);
+ __ movq(rax, rcx);
+ __ CallStub(&stub);
__ jmp(&done);
__ bind(&smi_case);
@@ -1580,14 +1575,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
- GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
- if (stub.ArgsInRegistersSupported()) {
- __ pop(rdx);
- stub.GenerateCall(masm_, rdx, rax);
- } else {
- __ push(result_register());
- __ CallStub(&stub);
- }
+ TypeRecordingBinaryOpStub stub(op, mode);
+ __ pop(rdx);
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -1934,7 +1924,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -3217,6 +3209,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// the first smi check before calling ToNumber.
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
@@ -3230,12 +3223,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- GenericBinaryOpStub stub(expr->binary_op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- stub.GenerateCall(masm_, rax, Smi::FromInt(1));
- __ bind(&done);
+ TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ if (expr->op() == Token::INC) {
+ __ Move(rdx, Smi::FromInt(1));
+ } else {
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
+ }
+ __ CallStub(&stub);
+ __ bind(&done);
// Store the value returned in rax.
switch (assign_type) {
case VARIABLE:
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index d060e31ced..e5a4bfcfb5 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -580,20 +580,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&check_pixel_array);
- // Check whether the elements object is a pixel array.
- // rdx: receiver
- // rax: key
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kPixelArrayMapRootIndex);
- __ j(not_equal, &check_number_dictionary);
- __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
+ GenerateFastPixelArrayLoad(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rax,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
@@ -1703,7 +1698,8 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
}
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ // Disabled, then patched inline smi code is not implemented on X64.
+ // So we do nothing in this case.
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 4f1efc480b..48d413016b 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -53,7 +53,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -188,9 +188,19 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- // Ensure that patching a deoptimization point won't overwrite the table.
- for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
- masm()->int3();
+ // Ensure that there is space at the end of the code to write a number
+ // of jump instructions, as well as to afford writing a call near the end
+ // of the code.
+ // The jumps are used when there isn't room in the code stream to write
+ // a long call instruction. Instead it writes a shorter call to a
+ // jump instruction in the same code object.
+ // The calls are used when lazy deoptimizing a function and calls to a
+ // deoptimization function.
+ int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
+ static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
+ int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
+ while (byte_count-- > 0) {
+ __ int3();
}
safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
@@ -364,7 +374,13 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallRuntime");
+ ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ __ CallRuntime(function, num_arguments);
+ RegisterLazyDeoptimization(instr);
}
@@ -499,6 +515,7 @@ void LCodeGen::RecordSafepoint(
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
+
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@@ -599,11 +616,115 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- Abort("Unimplemented: %s", "DoBitI");}
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+
+ if (right->IsConstantOperand()) {
+ int right_operand = ToInteger32(LConstantOperand::cast(right));
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), Immediate(right_operand));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (right->IsStackSlot()) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToOperand(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ ASSERT(right->IsRegister());
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToRegister(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
void LCodeGen::DoShiftI(LShiftI* instr) {
- Abort("Unimplemented: %s", "DoShiftI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+ if (right->IsRegister()) {
+ ASSERT(ToRegister(right).is(rcx));
+
+ switch (instr->op()) {
+ case Token::SAR:
+ __ sarl_cl(ToRegister(left));
+ break;
+ case Token::SHR:
+ __ shrl_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ __ shll_cl(ToRegister(left));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int value = ToInteger32(LConstantOperand::cast(right));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sarl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ } else {
+ __ shrl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -657,18 +778,22 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
+ ASSERT(instr->result()->IsRegister());
__ Move(ToRegister(instr->result()), instr->value());
}
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
}
@@ -678,7 +803,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Abort("Unimplemented: %s", "DoBitNotI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->Equals(instr->result()));
+ __ not_(ToRegister(input));
}
@@ -1193,7 +1320,7 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1389,32 +1516,32 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
- Register result = ToRegister(instr->result());
- if (result.is(rax)) {
- __ load_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- }
- if (instr->hydrogen()->check_hole_value()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
+ Abort("Unimplemented: %s", "DoLoadGlobal");
}
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
- if (value.is(rax)) {
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(temp));
+ bool check_hole = instr->hydrogen()->check_hole_value();
+ if (!check_hole && value.is(rax)) {
__ store_rax(instr->hydrogen()->cell().location(),
RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(kScratchRegister,
- Handle<Object>::cast(instr->hydrogen()->cell()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(Operand(kScratchRegister, 0), value);
+ return;
}
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ __ movq(temp,
+ Handle<Object>::cast(instr->hydrogen()->cell()),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ if (check_hole) {
+ __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ __ movq(Operand(temp, 0), value);
}
@@ -1446,7 +1573,19 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
+ ASSERT(instr->result()->Equals(instr->InputAt(0)));
+ Register reg = ToRegister(instr->InputAt(0));
+ __ movq(reg, FieldOperand(reg, JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ NearLabel done;
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Factory::fixed_cow_array_map());
+ __ Check(equal, "Check for fast elements failed.");
+ __ bind(&done);
+ }
}
@@ -1456,7 +1595,20 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+ Register elements = ToRegister(instr->elements());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(elements));
+
+ // Load the result.
+ __ movq(result, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Check for the hole value.
+ __ Cmp(result, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
}
@@ -1520,12 +1672,43 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallKnownFunction");
+ // Change context if needed.
+ bool change_context =
+ (graph()->info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ }
+
+ // Set rax to arguments count if adaption is not needed. Assumes that rax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ Set(rax, arity);
+ }
+
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ // Invoke function.
+ if (*function == *graph()->info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Setup deoptimization.
+ RegisterLazyDeoptimization(instr);
+
+ // Restore context.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->function());
+ CallKnownFunction(instr->function(), instr->arity(), instr);
}
@@ -1605,7 +1788,9 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->target());
+ CallKnownFunction(instr->target(), instr->arity(), instr);
}
@@ -1660,7 +1845,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
+ if (instr->length()->IsRegister()) {
+ __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
+ } else {
+ __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ DeoptimizeIf(above_equal, instr->environment());
}
@@ -1779,13 +1969,73 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+ NearLabel done, heap_number;
+ Register input_reg = ToRegister(instr->InputAt(0));
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+
+ if (instr->truncating()) {
+ __ j(equal, &heap_number);
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movl(input_reg, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&heap_number);
+
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2siq(input_reg, xmm0);
+ __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ __ cmpl(input_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(not_equal, instr->environment());
+
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, xmm0);
+ __ cvtlsi2sd(xmm_temp, input_reg);
+ __ ucomisd(xmm0, xmm_temp);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(input_reg, input_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(input_reg, xmm0);
+ __ andl(input_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ }
+ __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoTaggedToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
}
@@ -1811,7 +2061,33 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ InstanceType first = instr->hydrogen()->first();
+ InstanceType last = instr->hydrogen()->last();
+
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+ // String has a dedicated bit in instance type.
+ __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(kIsNotStringMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(last)));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
}
@@ -1866,12 +2142,47 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_elements());
+
+ // Pick the right runtime function or stub to call.
+ int length = instr->hydrogen()->length();
+ if (instr->hydrogen()->IsCopyOnWrite()) {
+ ASSERT(instr->hydrogen()->depth() == 1);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ } else {
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_properties());
+ __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+
+ // Pick the right runtime function to call.
+ if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ }
}
@@ -1881,7 +2192,20 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (shared_info->num_literals() == 0 && !pretenure) {
+ FastNewClosureStub stub;
+ __ Push(shared_info);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(rsi);
+ __ Push(shared_info);
+ __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
}
@@ -1994,7 +2318,6 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
// Perform stack overflow check.
NearLabel done;
- ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 0a52c6da18..12b952d226 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
+#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
@@ -68,11 +69,35 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs can use either fixed register or have a short lifetime (be
+ // used at start of the instruction).
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ LUnallocated::cast(operand)->IsUsedAtStart() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -162,6 +187,12 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@@ -262,7 +293,8 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
}
@@ -318,7 +350,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
@@ -386,7 +418,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -402,7 +434,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -670,7 +701,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -695,7 +729,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -740,8 +774,72 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- Abort("Unimplemented: %s", "DoBit");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+ ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+ HValue* right_value = instr->OperandAt(1);
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseFixed(right_value, rcx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool can_deopt = (op == Token::SHR && constant_value == 0);
+ if (can_deopt) {
+ bool can_truncate = true;
+ for (int i = 0; i < instr->uses()->length(); i++) {
+ if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+ can_truncate = false;
+ break;
+ }
+ }
+ can_deopt = !can_truncate;
+ }
+
+ LShiftI* result = new LShiftI(op, left, right, can_deopt);
+ return can_deopt
+ ? AssignEnvironment(DefineSameAsFirst(result))
+ : DefineSameAsFirst(result);
}
@@ -836,7 +934,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -859,11 +956,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -1006,8 +1099,9 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- Abort("Unimplemented: %s", "DoCompareMap");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LCmpMapAndBranch(value);
}
@@ -1049,6 +1143,18 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ Abort("Unimplemented: DoContext");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ Abort("Unimplemented: DoOuterContext");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
return DefineAsRegister(new LGlobalObject);
}
@@ -1061,8 +1167,8 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
}
@@ -1079,8 +1185,8 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
}
@@ -1091,8 +1197,8 @@ LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
}
@@ -1111,50 +1217,47 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- Abort("Unimplemented: %s", "DoShr");
- return NULL;
+ return DoShift(Token::SHR, instr);
}
LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- Abort("Unimplemented: %s", "DoSar");
- return NULL;
+ return DoShift(Token::SAR, instr);
}
LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- Abort("Unimplemented: %s", "DoShl");
- return NULL;
+ return DoShift(Token::SHL, instr);
}
LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- Abort("Unimplemented: %s", "DoBitAnd");
- return NULL;
+ return DoBit(Token::BIT_AND, instr);
}
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- Abort("Unimplemented: %s", "DoBitNot");
- return NULL;
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new LBitNotI(input);
+ return DefineSameAsFirst(result);
}
LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- Abort("Unimplemented: %s", "DoBitOr");
- return NULL;
+ return DoBit(Token::BIT_OR, instr);
}
LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- Abort("Unimplemented: %s", "DoBitXor");
- return NULL;
+ return DoBit(Token::BIT_XOR, instr);
}
@@ -1305,14 +1408,14 @@ LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayLength(array));
}
@@ -1323,8 +1426,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
- return NULL;
+ return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+ Use(instr->length())));
}
@@ -1409,8 +1512,9 @@ LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LCheckInstanceType* result = new LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
@@ -1472,7 +1576,10 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- return new LStoreGlobal(UseRegisterAtStart(instr->value()));}
+ LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
+ TempRegister());
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
+}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
@@ -1481,6 +1588,12 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
}
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ Abort("Unimplemented: DoStoreContextSlot");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
@@ -1502,15 +1615,19 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
- return NULL;
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineSameAsFirst(new LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
- return NULL;
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1572,14 +1689,12 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
}
@@ -1590,8 +1705,7 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
}
@@ -1699,7 +1813,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- Abort("Unimplemented: %s", "DoLeaveInlined");
+ HEnvironment* outer = current_block_->last_environment()->outer();
+ current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index abeb2a360d..64d141e335 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -287,7 +287,11 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
+
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -304,16 +308,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -327,11 +329,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
@@ -358,6 +384,11 @@ class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
@@ -1269,10 +1300,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ explicit LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
@@ -1280,12 +1312,16 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1602,11 +1638,10 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
+ explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
@@ -1781,7 +1816,7 @@ class LChunk: public ZoneObject {
pointer_maps_(8),
inlined_closures_(1) { }
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e104e5bb91..50b689b23f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -68,7 +68,9 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
}
-void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 3536911875..8bb3190448 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -74,7 +74,7 @@ class MacroAssembler: public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(Operand with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
void StoreRoot(Register source, Heap::RootListIndex index);
@@ -540,6 +540,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+
+ // If object is a string, its map is loaded into object_map.
+ template <typename LabelType>
+ void JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string);
+
+
template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
@@ -596,6 +604,12 @@ class MacroAssembler: public Assembler {
void Call(ExternalReference ext);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ Call(self, RelocInfo::CODE_TARGET);
+ }
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -1458,6 +1472,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
NearLabel result_ok;
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
@@ -1592,6 +1608,17 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
template <typename LabelType>
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string);
+}
+
+
+template <typename LabelType>
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 8888d70a6b..9cb88f36f6 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -2998,6 +2998,35 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ rdx,
+ rax,
+ rbx,
+ rcx,
+ rax,
+ &miss,
+ &miss,
+ &miss);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index d7e446d1c3..9b070d2064 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -66,7 +66,7 @@ test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL
test-deoptimization/DeoptimizeBinaryOperationDIV: FAIL
test-deoptimization/DeoptimizeLoadICStoreIC: FAIL
test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL
-test-deoptimization/DeoptimizeCompare: FAIL
+test-deoptimization/DeoptimizeCompare: PASS || FAIL
# Tests that time out with crankshaft.
test-api/Threading: SKIP
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index ee620678a7..f88ba384d7 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -5309,11 +5309,13 @@ TEST(DetachAndReattachGlobal) {
}
+static bool allowed_access_type[v8::ACCESS_KEYS + 1] = { false };
static bool NamedAccessBlocker(Local<v8::Object> global,
Local<Value> name,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global);
+ return Context::GetCurrent()->Global()->Equals(global) ||
+ allowed_access_type[type];
}
@@ -5321,7 +5323,8 @@ static bool IndexedAccessBlocker(Local<v8::Object> global,
uint32_t key,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global);
+ return Context::GetCurrent()->Global()->Equals(global) ||
+ allowed_access_type[type];
}
@@ -5353,7 +5356,7 @@ static void UnreachableSetter(Local<String>, Local<Value>,
}
-THREADED_TEST(AccessControl) {
+TEST(AccessControl) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
@@ -5379,6 +5382,27 @@ THREADED_TEST(AccessControl) {
v8::Handle<v8::Object> global0 = context0->Global();
+ // Define a property with JS getter and setter.
+ CompileRun(
+ "function getter() { return 'getter'; };\n"
+ "function setter() { return 'setter'; }\n"
+ "Object.defineProperty(this, 'js_accessor_p', {get:getter, set:setter})");
+
+ Local<Value> getter = global0->Get(v8_str("getter"));
+ Local<Value> setter = global0->Get(v8_str("setter"));
+
+ // And define normal element.
+ global0->Set(239, v8_str("239"));
+
+ // Define an element with JS getter and setter.
+ CompileRun(
+ "function el_getter() { return 'el_getter'; };\n"
+ "function el_setter() { return 'el_setter'; };\n"
+ "Object.defineProperty(this, '42', {get: el_getter, set: el_setter});");
+
+ Local<Value> el_getter = global0->Get(v8_str("el_getter"));
+ Local<Value> el_setter = global0->Get(v8_str("el_setter"));
+
v8::HandleScope scope1;
v8::Persistent<Context> context1 = Context::New();
@@ -5387,19 +5411,158 @@ THREADED_TEST(AccessControl) {
v8::Handle<v8::Object> global1 = context1->Global();
global1->Set(v8_str("other"), global0);
- v8::Handle<Value> value;
+ // Access blocked property.
+ CompileRun("other.blocked_prop = 1");
- // Access blocked property
- value = CompileRun("other.blocked_prop = 1");
- value = CompileRun("other.blocked_prop");
- CHECK(value->IsUndefined());
+ ExpectUndefined("other.blocked_prop");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
+ ExpectFalse("propertyIsEnumerable.call(other, 'blocked_prop')");
- value = CompileRun(
+ // Enable ACCESS_HAS
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other.blocked_prop");
+ // ... and now we can get the descriptor...
+ ExpectUndefined(
"Object.getOwnPropertyDescriptor(other, 'blocked_prop').value");
- CHECK(value->IsUndefined());
+ // ... and enumerate the property.
+ ExpectTrue("propertyIsEnumerable.call(other, 'blocked_prop')");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access blocked element.
+ CompileRun("other[239] = 1");
+
+ ExpectUndefined("other[239]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239')");
+ ExpectFalse("propertyIsEnumerable.call(other, '239')");
+
+ // Enable ACCESS_HAS
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other[239]");
+ // ... and now we can get the descriptor...
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239').value");
+ // ... and enumerate the property.
+ ExpectTrue("propertyIsEnumerable.call(other, '239')");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access a property with JS accessor.
+ CompileRun("other.js_accessor_p = 2");
+
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p')");
+
+ // Enable ACCESS_HAS.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_GET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+
+ ExpectString("other.js_accessor_p", "getter");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectString("other.js_accessor_p", "getter");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access an element with JS accessor.
+ CompileRun("other[42] = 2");
+
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42')");
+
+ // Enable ACCESS_HAS.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_GET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+
+ ExpectString("other[42]", "el_getter");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectString("other[42]", "el_getter");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
- value = CompileRun("propertyIsEnumerable.call(other, 'blocked_prop')");
- CHECK(value->IsFalse());
+ v8::Handle<Value> value;
// Access accessible property
value = CompileRun("other.accessible_prop = 3");
@@ -7362,6 +7525,61 @@ static void GenerateSomeGarbage() {
"garbage = undefined;");
}
+v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
+ static int count = 0;
+ if (count++ % 3 == 0) {
+ v8::V8::LowMemoryNotification(); // This should move the stub
+ GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
+ }
+ return v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+ nativeobject_templ->Set("callback",
+ v8::FunctionTemplate::New(DirectApiCallback));
+ v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+ context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ // call the api function multiple times to ensure direct call stub creation.
+ CompileRun(
+ "function f() {"
+ " for (var i = 1; i <= 30; i++) {"
+ " nativeobject.callback();"
+ " }"
+ "}"
+ "f();");
+}
+
+
+v8::Handle<v8::Value> ThrowingDirectApiCallback(const v8::Arguments& args) {
+ return v8::ThrowException(v8_str("g"));
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_Throw) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+ nativeobject_templ->Set("callback",
+ v8::FunctionTemplate::New(ThrowingDirectApiCallback));
+ v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+ context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ // call the api function multiple times to ensure direct call stub creation.
+ v8::Handle<Value> result = CompileRun(
+ "var result = '';"
+ "function f() {"
+ " for (var i = 1; i <= 5; i++) {"
+ " try { nativeobject.callback(); } catch (e) { result += e; }"
+ " }"
+ "}"
+ "f(); result;");
+ CHECK_EQ(v8_str("ggggg"), result);
+}
+
+
THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
v8::HandleScope scope;
@@ -10369,6 +10587,93 @@ THREADED_TEST(PixelArray) {
"i");
CHECK_EQ(255, result->Int32Value());
+ // Make sure that pixel array ICs recognize when a non-pixel array
+ // is passed to it.
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(just_ints);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
+ // Make sure that pixel array ICs recognize out-of-bound accesses.
+ result = CompileRun("function pa_load(p, start) {"
+ " var sum = 0;"
+ " for (var j = start; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels,-10);"
+ "}"
+ "result");
+ CHECK_EQ(0, result->Int32Value());
+
+ // Make sure that generic ICs properly handles a pixel array.
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
+ // Make sure that generic load ICs recognize out-of-bound accesses in
+ // pixel arrays.
+ result = CompileRun("function pa_load(p, start) {"
+ " var sum = 0;"
+ " for (var j = start; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints,0); }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels,-10);"
+ "}"
+ "result");
+ CHECK_EQ(0, result->Int32Value());
+
+ // Make sure that generic ICs properly handles other types than pixel
+ // arrays (that the inlined fast pixel array test leaves the right information
+ // in the right registers).
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+ "sparse_array = new Object();"
+ "for (var i = 0; i < 256; ++i) { sparse_array[i] = i; }"
+ "sparse_array[1000000] = 3;"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(sparse_array);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
free(pixel_data);
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index e5f73cb96a..43cf580a5a 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -409,71 +409,189 @@ TEST(6) {
}
-static void TestRoundingMode(int32_t mode, double value, int expected) {
+enum VCVTTypes {
+ s32_f64,
+ u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+ VFPRoundingMode mode,
+ double value,
+ int expected,
+ bool expected_exception = false) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
- __ vmrs(r1);
- // Set custom FPSCR.
- __ bic(r2, r1, Operand(((mode ^ 3) << 22) | 0xf));
- __ orr(r2, r2, Operand(mode << 22));
- __ vmsr(r2);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
- // Load value, convert, and move back result to r0.
- __ vmov(d1, value);
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
- __ vmov(r0, s0);
+ Label wrong_exception;
+
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
+
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception,
+ expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
- __ mov(pc, Operand(lr));
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
- CHECK(code->IsCode());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
- ::printf("res = %d\n", res);
- CHECK_EQ(expected, res);
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
+ }
}
TEST(7) {
// Test vfp rounding modes.
- // See ARM DDI 0406B Page A2-29.
- enum FPSCRRoungingMode {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
- };
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- TestRoundingMode(RZ, 0.5, 0);
- TestRoundingMode(RZ, -0.5, 0);
- TestRoundingMode(RZ, 123.7, 123);
- TestRoundingMode(RZ, -123.7, -123);
- TestRoundingMode(RZ, 123456.2, 123456);
- TestRoundingMode(RZ, -123456.2, -123456);
-
- TestRoundingMode(RM, 0.5, 0);
- TestRoundingMode(RM, -0.5, -1);
- TestRoundingMode(RM, 123.7, 123);
- TestRoundingMode(RM, -123.7, -124);
- TestRoundingMode(RM, 123456.2, 123456);
- TestRoundingMode(RM, -123456.2, -123457);
- }
+ // s32_f64 (double to integer).
+
+ TestRoundingMode(s32_f64, RN, 0, 0);
+ TestRoundingMode(s32_f64, RN, 0.5, 0);
+ TestRoundingMode(s32_f64, RN, -0.5, 0);
+ TestRoundingMode(s32_f64, RN, 1.5, 2);
+ TestRoundingMode(s32_f64, RN, -1.5, -2);
+ TestRoundingMode(s32_f64, RN, 123.7, 124);
+ TestRoundingMode(s32_f64, RN, -123.7, -124);
+ TestRoundingMode(s32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
+
+ TestRoundingMode(s32_f64, RM, 0, 0);
+ TestRoundingMode(s32_f64, RM, 0.5, 0);
+ TestRoundingMode(s32_f64, RM, -0.5, -1);
+ TestRoundingMode(s32_f64, RM, 123.7, 123);
+ TestRoundingMode(s32_f64, RM, -123.7, -124);
+ TestRoundingMode(s32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+ TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
+
+ TestRoundingMode(s32_f64, RZ, 0, 0);
+ TestRoundingMode(s32_f64, RZ, 0.5, 0);
+ TestRoundingMode(s32_f64, RZ, -0.5, 0);
+ TestRoundingMode(s32_f64, RZ, 123.7, 123);
+ TestRoundingMode(s32_f64, RZ, -123.7, -123);
+ TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+ // u32_f64 (double to integer).
+
+ // Negative values.
+ TestRoundingMode(u32_f64, RN, -0.5, 0);
+ TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+ TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RZ, -0.5, 0);
+ TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+ // Positive values.
+ // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+ static const uint32_t kMaxUInt = 0xffffffffu;
+ TestRoundingMode(u32_f64, RZ, 0, 0);
+ TestRoundingMode(u32_f64, RZ, 0.5, 0);
+ TestRoundingMode(u32_f64, RZ, 123.7, 123);
+ TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RM, 0, 0);
+ TestRoundingMode(u32_f64, RM, 0.5, 0);
+ TestRoundingMode(u32_f64, RM, 123.7, 123);
+ TestRoundingMode(u32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RN, 0, 0);
+ TestRoundingMode(u32_f64, RN, 0.5, 0);
+ TestRoundingMode(u32_f64, RN, 1.5, 2);
+ TestRoundingMode(u32_f64, RN, 123.7, 124);
+ TestRoundingMode(u32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
}
#undef __
diff --git a/deps/v8/test/cctest/test-bignum-dtoa.cc b/deps/v8/test/cctest/test-bignum-dtoa.cc
index 51a90574d7..a696ed8e3f 100644
--- a/deps/v8/test/cctest/test-bignum-dtoa.cc
+++ b/deps/v8/test/cctest/test-bignum-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,7 +44,7 @@ using namespace v8::internal;
// Removes trailing '0' digits.
// Can return the empty string if all digits are 0.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/deps/v8/test/cctest/test-dtoa.cc b/deps/v8/test/cctest/test-dtoa.cc
index ff0b660745..66c2aafc66 100644
--- a/deps/v8/test/cctest/test-dtoa.cc
+++ b/deps/v8/test/cctest/test-dtoa.cc
@@ -44,7 +44,7 @@ using namespace v8::internal;
// Removes trailing '0' digits.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/deps/v8/test/cctest/test-fast-dtoa.cc b/deps/v8/test/cctest/test-fast-dtoa.cc
index 30d247658c..d311713c6d 100644
--- a/deps/v8/test/cctest/test-fast-dtoa.cc
+++ b/deps/v8/test/cctest/test-fast-dtoa.cc
@@ -19,7 +19,7 @@ static const int kBufferSize = 100;
// Removes trailing '0' digits.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index 47b6887887..0dd2750690 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -44,9 +44,6 @@ chapter11/11.1/11.1.5: UNIMPLEMENTED
# We do not have a global object called 'global' as required by tests.
chapter15/15.1: FAIL_OK
-# NOT IMPLEMENTED: bind
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED
-
# NaN is writable. We are compatible with JSC.
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
# Infinity is writable. We are compatible with JSC.
@@ -94,7 +91,7 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-2: FAIL_OK
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-3: FAIL_OK
-# SUBSETFAIL + we do not implement Function.prototype.bind.
+# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-4: FAIL_OK
# SUBSETFAIL
@@ -175,9 +172,6 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-34: FAIL_OK
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
-# NOT IMPLEMENTED: bind on Function.prototype.
-chapter15/15.3/15.3.4/15.3.4.5/15.3.4.5-0-1: UNIMPLEMENTED
-
# Bad test - the spec does not say anything about throwing errors
# on calling Array.prototype.indexOf with undefined as argument.
chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-1-1: FAIL_OK
@@ -235,6 +229,218 @@ chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-2: FAIL_OK
chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
+##############################################################################
+# Unimplemented parts of strict mode
+# Setting expectations to fail only so that the tests trigger as soon as
+# the strict mode feature gets implemented
+
+# A directive preceeding an 'use strict' directive may not contain an OctalEscapeSequence
+# Incorrect test - need double escape in eval.
+chapter07/7.8/7.8.4/7.8.4-1-s: FAIL
+
+# this is not coerced to an object in strict mode (Number)
+chapter10/10.4/10.4.3/10.4.3-1-1-s: FAIL
+# this is not coerced to an object in strict mode (string)
+chapter10/10.4/10.4.3/10.4.3-1-2-s: FAIL
+# this is not coerced to an object in strict mode (undefined)
+chapter10/10.4/10.4.3/10.4.3-1-3-s: FAIL
+# this is not coerced to an object in strict mode (boolean)
+chapter10/10.4/10.4.3/10.4.3-1-4-s: FAIL
+
+# arguments[i] remains same after changing actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-1-s: FAIL
+# arguments[i] doesn't map to actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-2-s: FAIL
+
+# Accessing caller property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-b-1-s: FAIL
+# arguments.caller exists in strict mode
+chapter10/10.6/10.6-13-b-2-s: FAIL
+# arguments.caller is non-configurable in strict mode
+chapter10/10.6/10.6-13-b-3-s: FAIL
+# Accessing callee property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-c-1-s: FAIL
+# arguments.callee is non-configurable in strict mode
+chapter10/10.6/10.6-13-c-3-s: FAIL
+
+# simple assignment throws ReferenceError if LeftHandSide is an unresolvable reference in strict mode
+chapter11/11.13/11.13.1/11.13.1-1-5-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a property reference with a primitive base value (this is undefined)
+chapter11/11.13/11.13.1/11.13.1-1-7-s: FAIL
+
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-2-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.Infinity)
+chapter11/11.13/11.13.1/11.13.1-4-3-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.length)
+chapter11/11.13/11.13.1/11.13.1-4-4-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Object.length)
+chapter11/11.13/11.13.1/11.13.1-4-5-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Function.length)
+chapter11/11.13/11.13.1/11.13.1-4-6-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Array.length)
+chapter11/11.13/11.13.1/11.13.1-4-7-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (String.length)
+chapter11/11.13/11.13.1/11.13.1-4-8-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Boolean.length)
+chapter11/11.13/11.13.1/11.13.1-4-9-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.length)
+chapter11/11.13/11.13.1/11.13.1-4-10-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Date.length)
+chapter11/11.13/11.13.1/11.13.1-4-11-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (RegExp.length)
+chapter11/11.13/11.13.1/11.13.1-4-12-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Error.length)
+chapter11/11.13/11.13.1/11.13.1-4-13-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.MAX_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-14-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.MIN_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-15-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-16-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.NEGATIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-17-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.POSITIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-18-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.E)
+chapter11/11.13/11.13.1/11.13.1-4-19-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LN10)
+chapter11/11.13/11.13.1/11.13.1-4-20-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LN2)
+chapter11/11.13/11.13.1/11.13.1-4-21-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LOG2E)
+chapter11/11.13/11.13.1/11.13.1-4-22-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LOG10E)
+chapter11/11.13/11.13.1/11.13.1-4-23-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.PI)
+chapter11/11.13/11.13.1/11.13.1-4-24-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.SQRT1_2)
+chapter11/11.13/11.13.1/11.13.1-4-25-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.SQRT2)
+chapter11/11.13/11.13.1/11.13.1-4-26-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.undefined)
+chapter11/11.13/11.13.1/11.13.1-4-27-s: FAIL
+
+# delete operator throws TypeError when deleting a non-configurable data property in strict mode
+chapter11/11.4/11.4.1/11.4.1-4.a-3-s: FAIL
+# delete operator throws TypeError when when deleting a non-configurable data property in strict mode (Global.NaN)
+chapter11/11.4/11.4.1/11.4.1-4.a-4-s: FAIL
+# delete operator throws TypeError when deleting a non-configurable data property in strict mode (Math.LN2)
+chapter11/11.4/11.4.1/11.4.1-4.a-9-s: FAIL
+
+# delete operator throws ReferenceError when deleting a direct reference to a var in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-1-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference to a function argument in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-2-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference to a function name in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-3-s: FAIL
+# delete operator throws SyntaxError when deleting a direct reference to a function argument(object) in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-4-s: FAIL
+
+# eval - a function declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-1-s: FAIL
+# eval - a function assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-2-s: FAIL
+# eval - a function expr declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-3-s: FAIL
+# eval - a function expr assigning into 'eval' throws a EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-4-s: FAIL
+# eval - a Function declaring var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-5-s: FAIL
+# eval - a Function assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-6-s: FAIL
+# eval - a direct eval declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-7-s: FAIL
+# eval - a direct eval assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-8-s: FAIL
+# eval - an indirect eval declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-9-s: FAIL
+# eval - an indirect eval assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-10-s: FAIL
+
+# SyntaxError if eval used as function identifier in function declaration with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-3-s: FAIL
+# SyntaxError if eval used as function identifier in function expression with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-4-s: FAIL
+# SyntaxError if eval used as function identifier in function declaration in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-5-s: FAIL
+# SyntaxError if eval used as function identifier in function expression in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-6-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-9-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-10-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-11-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-12-s: FAIL
+
+# 'use strict' directive - correct usage
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-1-s: FAIL
+# "use strict" directive - correct usage double quotes
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-2-s: FAIL
+# 'use strict' directive - may follow other directives
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-8-s: FAIL
+# 'use strict' directive - may occur multiple times
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-9-s: FAIL
+# other directives - may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-10-s: FAIL
+# comments may preceed 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-11-s: FAIL
+# comments may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-12-s: FAIL
+# semicolon insertion works for'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-13-s: FAIL
+# semicolon insertion may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-14-s: FAIL
+# blank lines may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-15-s: FAIL
+
+# Duplicate combined parameter name allowed in Function constructor called in strict mode if body not strict
+# Invalid test case per ECMA-262 5th Edition, 10.1.1, bullet 4
+chapter15/15.3/15.3.2/15.3.2.1/15.3.2.1-11-6-s: FAIL
+
+# Array.prototype.every - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1-s: FAIL
+# Array.prototype.some - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1-s: FAIL
+# Array.prototype.forEach - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1-s: FAIL
+# Array.prototype.map - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1-s: FAIL
+# Array.prototype.filter - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.20/15.4.4.20-5-1-s: FAIL
+# Array.prototype.reduce - null passed as thisValue to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL
+
[ $arch == mips ]
# Skip all tests on MIPS.
diff --git a/deps/v8/test/mjsunit/get-own-property-descriptor.js b/deps/v8/test/mjsunit/get-own-property-descriptor.js
index ceb7715384..79c1fac6ae 100644
--- a/deps/v8/test/mjsunit/get-own-property-descriptor.js
+++ b/deps/v8/test/mjsunit/get-own-property-descriptor.js
@@ -103,3 +103,19 @@ objWithProto.prototype = proto;
objWithProto[0] = 'bar';
var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
assertEquals(undefined, descWithProto);
+
+// Test elements on global proxy object.
+var global = (function() { return this; })();
+
+global[42] = 42;
+
+function el_getter() { return 239; };
+function el_setter() {};
+Object.defineProperty(global, '239', {get: el_getter, set: el_setter});
+
+var descRegularElement = Object.getOwnPropertyDescriptor(global, '42');
+assertEquals(42, descRegularElement.value);
+
+var descAccessorElement = Object.getOwnPropertyDescriptor(global, '239');
+assertEquals(el_getter, descAccessorElement.get);
+assertEquals(el_setter, descAccessorElement.set);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 057c0fa874..2448564608 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -112,6 +112,8 @@ regress/regress-3247124: SKIP
# Test that currently fails with crankshaft on ARM.
compiler/simple-osr: FAIL
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
##############################################################################
[ $arch == x64 && $crankshaft ]
@@ -119,6 +121,8 @@ compiler/simple-osr: FAIL
# BUG (1026) This test is currently flaky.
compiler/simple-osr: SKIP
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
##############################################################################
[ $arch == mips ]
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index d24a4e5a39..a8a32130c4 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -749,14 +749,33 @@ assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property.
try {
Object.defineProperty(obj6, '2', descElement);
+ desc = Object.getOwnPropertyDescriptor(obj6, '2');
+ assertEquals(desc.value, 'foobar');
+} catch (e) {
+ assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+ var descAccessor = { get: function() { return 0; } };
+ Object.defineProperty(obj6, '2', descAccessor);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
+Object.defineProperty(obj6, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(obj6, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
Object.defineProperty(obj6, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(obj6, '3');
assertEquals(desc.value, 'foofoo');
@@ -827,14 +846,33 @@ assertTrue(desc.writable);
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property of an array.
try {
Object.defineProperty(arr, '2', descElement);
+ desc = Object.getOwnPropertyDescriptor(arr, '2');
+ assertEquals(desc.value, 'foobar');
+} catch (e) {
+ assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+ var descAccessor = { get: function() { return 0; } };
+ Object.defineProperty(arr, '2', descAccessor);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
+Object.defineProperty(arr, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(arr, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
Object.defineProperty(arr, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(arr, '3');
assertEquals(desc.value, 'foofoo');
@@ -898,3 +936,98 @@ Object.defineProperty(o, "x", { writable: false });
assertEquals(undefined, o.x);
o.x = 37;
assertEquals(undefined, o.x);
+
+function testDefineProperty(obj, propertyName, desc, resultDesc) {
+ Object.defineProperty(obj, propertyName, desc);
+ var actualDesc = Object.getOwnPropertyDescriptor(obj, propertyName);
+ assertEquals(resultDesc.enumerable, actualDesc.enumerable);
+ assertEquals(resultDesc.configurable, actualDesc.configurable);
+ if (resultDesc.hasOwnProperty('value')) {
+ assertEquals(resultDesc.value, actualDesc.value);
+ assertEquals(resultDesc.writable, actualDesc.writable);
+ assertFalse(resultDesc.hasOwnProperty('get'));
+ assertFalse(resultDesc.hasOwnProperty('set'));
+ } else {
+ assertEquals(resultDesc.get, actualDesc.get);
+ assertEquals(resultDesc.set, actualDesc.set);
+ assertFalse(resultDesc.hasOwnProperty('value'));
+ assertFalse(resultDesc.hasOwnProperty('writable'));
+ }
+}
+
+// tests redefining existing property with a generic descriptor
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { configurable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false },
+ { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { configurable : false },
+ { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true, configurable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : true },
+ { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true, configurable : false },
+ { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { value : 42, writable : true, enumerable : false, configurable : false });
+
+// can make a writable, non-configurable field non-writable
+o = { p : 42 };
+Object.defineProperty(o, 'p', { configurable: false });
+testDefineProperty(o, 'p',
+ { writable: false },
+ { value : 42, writable : false, enumerable : true, configurable : false });
+
+// redefine of get only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { get : getter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: getter1, set: undefined, enumerable : false, configurable : false });
+
+// redefine of get/set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { get: getter1, set: setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: getter1, set: setter1, enumerable : false, configurable : false });
+
+// redefine of set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { set : setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: undefined, set: setter1, enumerable : false, configurable : false });
diff --git a/deps/v8/test/mjsunit/regress/regress-1083.js b/deps/v8/test/mjsunit/regress/regress-1083.js
new file mode 100644
index 0000000000..d231899b70
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1083.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that changing the generic descriptor flags on a property
+// on the global object doesn't break invariants.
+Object.defineProperty(this, 'Object', {enumerable:true});
+
+var desc = Object.getOwnPropertyDescriptor(this, 'Object');
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+assertFalse(desc.hasOwnProperty('get'));
+assertFalse(desc.hasOwnProperty('set'));
+assertTrue(desc.hasOwnProperty('value'));
+assertTrue(desc.writable);
diff --git a/deps/v8/test/mjsunit/regress/regress-1092.js b/deps/v8/test/mjsunit/regress/regress-1092.js
new file mode 100644
index 0000000000..0b29231a4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1092.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that CodeGenerator::EmitKeyedPropertyAssignment for the start
+// of an initialization block doesn't normalize the properties of the
+// JSGlobalProxy.
+this.w = 0;
+this.x = 1;
+this.y = 2;
+this.z = 3;
+
diff --git a/deps/v8/test/mjsunit/regress/regress-1099.js b/deps/v8/test/mjsunit/regress/regress-1099.js
new file mode 100644
index 0000000000..0ed6ede4af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1099.js
@@ -0,0 +1,46 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that LApplyArguments lithium instruction restores context after the call.
+
+function X() {
+ var slot = "foo"; return function (a) { return slot === a; }
+}
+
+function Y(x) {
+ var slot = "bar";
+ return function (a) {
+ x.apply(this, arguments);
+ return slot === 'bar';
+ };
+}
+
+var y = Y(X());
+
+for (var i = 0; i < 1000000; i++) {
+ assertTrue(y("foo"));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-900966.js b/deps/v8/test/mjsunit/regress/regress-900966.js
index acffe7503d..99603c12ef 100644
--- a/deps/v8/test/mjsunit/regress/regress-900966.js
+++ b/deps/v8/test/mjsunit/regress/regress-900966.js
@@ -37,6 +37,8 @@ function f() {
}
f();
f();
+f();
+f();
assertTrue(2[11] === undefined);
Number.prototype[11] = 'y';
diff --git a/deps/v8/test/mjsunit/regress/regress-992.js b/deps/v8/test/mjsunit/regress/regress-992.js
new file mode 100644
index 0000000000..dbe25a5f63
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-992.js
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Object.defineProperty with generic desc on existing property
+// should just update enumerable/configurable flags.
+
+var obj = { get p() { return 42; } };
+var desc = Object.getOwnPropertyDescriptor(obj, 'p');
+var getter = desc.get;
+
+Object.defineProperty(obj, 'p', {enumerable: false });
+assertEquals(obj.p, 42);
+desc = Object.getOwnPropertyDescriptor(obj, 'p');
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertEquals(desc.get, getter);
+assertEquals(desc.set, undefined);
+assertFalse(desc.hasOwnProperty('value'));
+assertFalse(desc.hasOwnProperty('writable'));
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-gc.js b/deps/v8/test/mjsunit/regress/regress-deopt-gc.js
new file mode 100644
index 0000000000..7b7c29a31e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-gc.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// This tests that we can correctly handle a GC immediately after a function
+// has been deoptimized, even when we have an activation of this function on
+// the stack.
+
+// Ensure that there is code objects before the code for the opt_me function.
+(function() { var a = 10; a++; })();
+
+function opt_me() {
+ deopt();
+}
+
+function deopt() {
+ // Make sure we don't inline this function
+ try { var a = 42; } catch(o) {};
+ %DeoptimizeFunction(opt_me);
+ gc(true);
+}
+
+
+opt_me();
diff --git a/deps/v8/test/mjsunit/strict-mode-eval.js b/deps/v8/test/mjsunit/strict-mode-eval.js
new file mode 100644
index 0000000000..018ed9e075
--- /dev/null
+++ b/deps/v8/test/mjsunit/strict-mode-eval.js
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var code1 = "function f(eval) {}";
+var code2 = "function f(a, a) {}";
+var code3 = "var x = '\\020;'";
+var code4 = "function arguments() {}";
+
+// Verify the code compiles just fine in non-strict mode
+// (using aliased eval to force non-strict mode)
+var eval_alias = eval;
+
+eval_alias(code1);
+eval_alias(code2);
+eval_alias(code3);
+eval_alias(code4);
+
+function strict1() {
+ try {
+ eval(code1);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict2() {
+ try {
+ eval(code2);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict3() {
+ try {
+ eval(code3);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict4() {
+ try {
+ eval(code4);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+ }
+ strict4();
+ }
+ strict3();
+ }
+ strict2();
+}
+strict1();
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 3cb6d32923..ddddfabee4 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -271,3 +271,68 @@ CheckStrictMode("function strict() { var x = --arguments; }", SyntaxError);
var y = [void arguments, typeof arguments,
+arguments, -arguments, ~arguments, !arguments];
})();
+
+// 7.6.1.2 Future Reserved Words
+var future_reserved_words = [
+ "class",
+ "enum",
+ "export",
+ "extends",
+ "import",
+ "super",
+ "implements",
+ "interface",
+ "let",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "static",
+ "yield" ];
+
+function testFutureReservedWord(word) {
+ // Simple use of each reserved word
+ CheckStrictMode("var " + word + " = 1;", SyntaxError);
+
+ // object literal properties
+ eval("var x = { " + word + " : 42 };");
+ eval("var x = { get " + word + " () {} };");
+ eval("var x = { set " + word + " (value) {} };");
+
+ // object literal with string literal property names
+ eval("var x = { '" + word + "' : 42 };");
+ eval("var x = { get '" + word + "' () { } };");
+ eval("var x = { set '" + word + "' (value) { } };");
+ eval("var x = { get '" + word + "' () { 'use strict'; } };");
+ eval("var x = { set '" + word + "' (value) { 'use strict'; } };");
+
+ // Function names and arguments, strict and non-strict contexts
+ CheckStrictMode("function " + word + " () {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ", " + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (a, " + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ", a) {}", SyntaxError);
+ CheckStrictMode("function foo (a, " + word + ", b) {}", SyntaxError);
+ CheckStrictMode("var foo = function (" + word + ") {}", SyntaxError);
+
+ // Function names and arguments when the body is strict
+ assertThrows("function " + word + " () { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (" + word + ") 'use strict'; {}", SyntaxError);
+ assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (a, " + word + ") { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (" + word + ", a) { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (a, " + word + ", b) { 'use strict'; }", SyntaxError);
+ assertThrows("var foo = function (" + word + ") { 'use strict'; }", SyntaxError);
+
+ // get/set when the body is strict
+ eval("var x = { get " + word + " () { 'use strict'; } };");
+ eval("var x = { set " + word + " (value) { 'use strict'; } };");
+ assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };", SyntaxError);
+ assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };", SyntaxError);
+}
+
+for (var i = 0; i < future_reserved_words.length; i++) {
+ testFutureReservedWord(future_reserved_words[i]);
+}
+
+
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index cfccc4603a..3b6a524c26 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -110,7 +110,7 @@ ecma_3/RegExp/regress-209067: PASS || FAIL if $mode == debug
js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
# http://b/issue?id=1206983
js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
-ecma/Date/15.9.5.10-2: PASS || FAIL if $mode == debug
+ecma/Date/15.9.5.10-2: PASS || (FAIL || TIMEOUT if $mode == debug)
# These tests create two Date objects just after each other and
# expects them to match. Sometimes this happens on the border
@@ -169,7 +169,6 @@ js1_5/Regress/regress-416628: PASS || FAIL || TIMEOUT if $mode == debug
# In Denmark the adjustment starts one week earlier!.
# Tests based on shell that use dates in this gap are flaky.
ecma/Date/15.9.5.10-1: PASS || FAIL
-ecma/Date/15.9.5.10-2: PASS || TIMEOUT if ($arch == arm && $mode == debug)
ecma/Date/15.9.5.12-1: PASS || FAIL
ecma/Date/15.9.5.14: PASS || FAIL
ecma/Date/15.9.5.34-1: PASS || FAIL
@@ -821,20 +820,17 @@ js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
js1_5/extensions/regress-342960: SKIP
# BUG(3251229): Times out when running new crankshaft test script.
-ecma/Date/15.9.5.12-2: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Date/15.9.5.10-2: SKIP
-ecma/Date/15.9.5.8: SKIP
ecma_3/RegExp/regress-311414: SKIP
-js1_5/Array/regress-99120-02: SKIP
-js1_5/Regress/regress-203278-1: SKIP
ecma/Date/15.9.5.8: SKIP
ecma/Date/15.9.5.10-2: SKIP
ecma/Date/15.9.5.11-2: SKIP
ecma/Date/15.9.5.12-2: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/extensions/regress-371636: SKIP
+js1_5/Regress/regress-203278-1: SKIP
js1_5/Regress/regress-404755: SKIP
js1_5/Regress/regress-451322: SKIP
-js1_5/extensions/regress-371636: SKIP
+
# BUG(1040): Allow this test to timeout.
js1_5/GC/regress-203278-2: PASS || TIMEOUT
@@ -865,7 +861,6 @@ ecma/Date/15.9.5.20: SKIP
ecma/Date/15.9.5.12-2: SKIP
ecma/Date/15.9.5.8: SKIP
ecma/Date/15.9.5.9: SKIP
-ecma/Date/15.9.5.10-2: SKIP
ecma/Date/15.9.5.11-2: SKIP
ecma/Expressions/11.7.2: SKIP
ecma/Expressions/11.10-2: SKIP
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index c5af0164d4..f73c7e141c 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -441,6 +441,7 @@
'../../src/lithium.h',
'../../src/lithium-allocator.cc',
'../../src/lithium-allocator.h',
+ '../../src/lithium-allocator-inl.h',
'../../src/liveedit.cc',
'../../src/liveedit.h',
'../../src/liveobjectlist-inl.h',
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index ceeebf9ab5..a980bd2dc5 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -689,6 +689,14 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\lithium.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\lithium-allocator.cc"
>
</File>
@@ -697,6 +705,10 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ia32\lithium-ia32.cc"
>
</File>
@@ -713,6 +725,14 @@
>
</File>
<File
+ RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\liveedit.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index cd4c52eb26..6aa73da3ed 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -697,6 +697,10 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\lithium-arm.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index 2c7bf5ef86..c8ceaa256b 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -493,14 +493,6 @@
>
</File>
<File
- RelativePath="..\..\src\flow-graph.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\flow-graph.h"
- >
- </File>
- <File
RelativePath="..\..\src\frame-element.cc"
>
</File>
@@ -610,6 +602,22 @@
>
</File>
<File
+ RelativePath="..\..\src\hydrogen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen-instructions.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen-instructions.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\x64\ic-x64.cc"
>
</File>
@@ -682,6 +690,14 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\lithium.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\lithium-allocator.cc"
>
</File>
@@ -690,6 +706,34 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-x64.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-codegen-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-codegen-x64.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-gap-resolver-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-gap-resolver-x64.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\liveedit.cc"
>
</File>
@@ -806,6 +850,22 @@
>
</File>
<File
+ RelativePath="..\..\src\preparser.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparser.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparse-data.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparse-data.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\profile-generator.cc"
>
</File>
@@ -930,6 +990,14 @@
>
</File>
<File
+ RelativePath="..\..\src\scanner-base.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\scanner-base.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\scanner.cc"
>
</File>
@@ -1086,6 +1154,14 @@
>
</File>
<File
+ RelativePath="..\..\src\v8checks.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\v8globals.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\v8threads.cc"
>
</File>
@@ -1094,6 +1170,10 @@
>
</File>
<File
+ RelativePath="..\..\src\v8utils.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\variables.cc"
>
</File>
@@ -1157,6 +1237,22 @@
RelativePath="..\..\src\zone.h"
>
</File>
+ <File
+ RelativePath="..\..\src\extensions\externalize-string-extension.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\externalize-string-extension.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\gc-extension.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\gc-extension.h"
+ >
+ </File>
<Filter
Name="third party"
>
@@ -1197,6 +1293,10 @@
RelativePath="..\..\include\v8.h"
>
</File>
+ <File
+ RelativePath="..\..\include\v8stdint.h"
+ >
+ </File>
</Filter>
</Files>
<Globals>