summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm.cc186
-rw-r--r--deps/v8/src/arm/assembler-arm.h221
-rw-r--r--deps/v8/src/arm/assembler-thumb2-inl.h267
-rw-r--r--deps/v8/src/arm/assembler-thumb2.cc1821
-rw-r--r--deps/v8/src/arm/assembler-thumb2.h1027
-rw-r--r--deps/v8/src/arm/codegen-arm.cc92
-rw-r--r--deps/v8/src/arm/codegen-arm.h21
-rw-r--r--deps/v8/src/arm/disasm-arm.cc21
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc350
-rw-r--r--deps/v8/src/arm/frames-arm.cc4
-rw-r--r--deps/v8/src/arm/ic-arm.cc10
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc30
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h6
-rw-r--r--deps/v8/src/arm/simulator-arm.cc20
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc47
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc3
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h1
17 files changed, 3667 insertions, 460 deletions
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index d9247288ca..07da800903 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -114,55 +114,55 @@ CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
-Register s0 = { 0 };
-Register s1 = { 1 };
-Register s2 = { 2 };
-Register s3 = { 3 };
-Register s4 = { 4 };
-Register s5 = { 5 };
-Register s6 = { 6 };
-Register s7 = { 7 };
-Register s8 = { 8 };
-Register s9 = { 9 };
-Register s10 = { 10 };
-Register s11 = { 11 };
-Register s12 = { 12 };
-Register s13 = { 13 };
-Register s14 = { 14 };
-Register s15 = { 15 };
-Register s16 = { 16 };
-Register s17 = { 17 };
-Register s18 = { 18 };
-Register s19 = { 19 };
-Register s20 = { 20 };
-Register s21 = { 21 };
-Register s22 = { 22 };
-Register s23 = { 23 };
-Register s24 = { 24 };
-Register s25 = { 25 };
-Register s26 = { 26 };
-Register s27 = { 27 };
-Register s28 = { 28 };
-Register s29 = { 29 };
-Register s30 = { 30 };
-Register s31 = { 31 };
-
-Register d0 = { 0 };
-Register d1 = { 1 };
-Register d2 = { 2 };
-Register d3 = { 3 };
-Register d4 = { 4 };
-Register d5 = { 5 };
-Register d6 = { 6 };
-Register d7 = { 7 };
-Register d8 = { 8 };
-Register d9 = { 9 };
-Register d10 = { 10 };
-Register d11 = { 11 };
-Register d12 = { 12 };
-Register d13 = { 13 };
-Register d14 = { 14 };
-Register d15 = { 15 };
+SwVfpRegister s0 = { 0 };
+SwVfpRegister s1 = { 1 };
+SwVfpRegister s2 = { 2 };
+SwVfpRegister s3 = { 3 };
+SwVfpRegister s4 = { 4 };
+SwVfpRegister s5 = { 5 };
+SwVfpRegister s6 = { 6 };
+SwVfpRegister s7 = { 7 };
+SwVfpRegister s8 = { 8 };
+SwVfpRegister s9 = { 9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
+
+DwVfpRegister d0 = { 0 };
+DwVfpRegister d1 = { 1 };
+DwVfpRegister d2 = { 2 };
+DwVfpRegister d3 = { 3 };
+DwVfpRegister d4 = { 4 };
+DwVfpRegister d5 = { 5 };
+DwVfpRegister d6 = { 6 };
+DwVfpRegister d7 = { 7 };
+DwVfpRegister d8 = { 8 };
+DwVfpRegister d9 = { 9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -1371,11 +1371,10 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
-void Assembler::fmdrr(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
+void Assembler::vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@@ -1387,11 +1386,10 @@ void Assembler::fmdrr(const Register dst,
}
-void Assembler::fmrrd(const Register dst1,
- const Register dst2,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@@ -1403,9 +1401,8 @@ void Assembler::fmrrd(const Register dst1,
}
-void Assembler::fmsr(const Register dst,
+void Assembler::vmov(const SwVfpRegister dst,
const Register src,
- const SBit s,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1418,9 +1415,8 @@ void Assembler::fmsr(const Register dst,
}
-void Assembler::fmrs(const Register dst,
- const Register src,
- const SBit s,
+void Assembler::vmov(const Register dst,
+ const SwVfpRegister src,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1433,10 +1429,9 @@ void Assembler::fmrs(const Register dst,
}
-void Assembler::fsitod(const Register dst,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@@ -1448,10 +1443,9 @@ void Assembler::fsitod(const Register dst,
}
-void Assembler::ftosid(const Register dst,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@@ -1463,12 +1457,11 @@ void Assembler::ftosid(const Register dst,
}
-void Assembler::faddd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = faddd(Dn, Dm) double precision floating point addition.
+void Assembler::vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1479,12 +1472,11 @@ void Assembler::faddd(const Register dst,
}
-void Assembler::fsubd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+void Assembler::vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1495,12 +1487,11 @@ void Assembler::fsubd(const Register dst,
}
-void Assembler::fmuld(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+void Assembler::vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@@ -1511,12 +1502,11 @@ void Assembler::fmuld(const Register dst,
}
-void Assembler::fdivd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fdivd(Dn, Dm) double precision floating point division.
+void Assembler::vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@@ -1527,8 +1517,8 @@ void Assembler::fdivd(const Register dst,
}
-void Assembler::fcmp(const Register src1,
- const Register src2,
+void Assembler::vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 86bc18a247..cd53dd6097 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -103,57 +103,94 @@ extern Register sp;
extern Register lr;
extern Register pc;
-// Support for VFP registers s0 to s32 (d0 to d16).
-// Note that "sN:sM" is the same as "dN/2".
-extern Register s0;
-extern Register s1;
-extern Register s2;
-extern Register s3;
-extern Register s4;
-extern Register s5;
-extern Register s6;
-extern Register s7;
-extern Register s8;
-extern Register s9;
-extern Register s10;
-extern Register s11;
-extern Register s12;
-extern Register s13;
-extern Register s14;
-extern Register s15;
-extern Register s16;
-extern Register s17;
-extern Register s18;
-extern Register s19;
-extern Register s20;
-extern Register s21;
-extern Register s22;
-extern Register s23;
-extern Register s24;
-extern Register s25;
-extern Register s26;
-extern Register s27;
-extern Register s28;
-extern Register s29;
-extern Register s30;
-extern Register s31;
-
-extern Register d0;
-extern Register d1;
-extern Register d2;
-extern Register d3;
-extern Register d4;
-extern Register d5;
-extern Register d6;
-extern Register d7;
-extern Register d8;
-extern Register d9;
-extern Register d10;
-extern Register d11;
-extern Register d12;
-extern Register d13;
-extern Register d14;
-extern Register d15;
+
+// Single word VFP register.
+struct SwVfpRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 32; }
+ bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+ // Supporting d0 to d15, can be later extended to d31.
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
// Coprocessor register
struct CRegister {
@@ -759,55 +796,45 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
- void fmdrr(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmrrd(const Register dst1,
- const Register dst2,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmsr(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
+ void vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond = al);
+ void vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
const Condition cond = al);
- void fmrs(const Register dst,
+ void vmov(const SwVfpRegister dst,
const Register src,
- const SBit s = LeaveCC,
const Condition cond = al);
- void fsitod(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void ftosid(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
-
- void faddd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fsubd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmuld(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fdivd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fcmp(const Register src1,
- const Register src2,
+ void vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+
+ void vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
diff --git a/deps/v8/src/arm/assembler-thumb2-inl.h b/deps/v8/src/arm/assembler-thumb2-inl.h
new file mode 100644
index 0000000000..3808ef00fa
--- /dev/null
+++ b/deps/v8/src/arm/assembler-thumb2-inl.h
@@ -0,0 +1,267 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
+#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
+
+#include "arm/assembler-thumb2.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(intptr_t delta) {
+ if (RelocInfo::IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p += delta; // relocate entry
+ }
+ // We do not use pc relative addressing on ARM, so there is
+ // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // On ARM a "call instruction" is actually two instructions.
+ // mov lr, pc
+ // ldr pc, [pc, #XXX]
+ return (Assembler::instr_at(pc_) == kMovLrPc)
+ && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+ == kLdrPCPattern);
+}
+
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Object** opp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(opp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Context** cpp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(cpp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+}
+
+
+bool Operand::is_reg() const {
+ return rm_.is_valid() &&
+ rs_.is(no_reg) &&
+ shift_op_ == LSL &&
+ shift_imm_ == 0;
+}
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+ Instr instr = Memory::int32_at(pc);
+ // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+ int offset = instr & 0xfff; // offset_12 is unsigned
+ if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
+ // Verify that the constant pool comes after the instruction referencing it.
+ ASSERT(offset >= -4);
+ return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_at(Address constant_pool_entry,
+ Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_address_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to flush the instruction cache
+ // after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction was actually patched by the assignment
+ // above; the target address is not part of an instruction, it is patched in
+ // the constant pool and is read via a data access; the instruction accessing
+ // this address in the constant pool remains unchanged.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/deps/v8/src/arm/assembler-thumb2.cc b/deps/v8/src/arm/assembler-thumb2.cc
new file mode 100644
index 0000000000..6c2b9032fa
--- /dev/null
+++ b/deps/v8/src/arm/assembler-thumb2.cc
@@ -0,0 +1,1821 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arm/assembler-thumb2-inl.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__)
+ // For the simulator=arm build, always use VFP since the arm simulator has
+ // VFP support.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+
+ if (OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if
+ // runtime detection of VFP returns true.
+ supported_ |= 1u << VFP3;
+ found_by_runtime_probing_ |= 1u << VFP3;
+ }
+#endif
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0 = { 0 };
+Register r1 = { 1 };
+Register r2 = { 2 };
+Register r3 = { 3 };
+Register r4 = { 4 };
+Register r5 = { 5 };
+Register r6 = { 6 };
+Register r7 = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register fp = { 11 };
+Register ip = { 12 };
+Register sp = { 13 };
+Register lr = { 14 };
+Register pc = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0 = { 0 };
+CRegister cr1 = { 1 };
+CRegister cr2 = { 2 };
+CRegister cr3 = { 3 };
+CRegister cr4 = { 4 };
+CRegister cr5 = { 5 };
+CRegister cr6 = { 6 };
+CRegister cr7 = { 7 };
+CRegister cr8 = { 8 };
+CRegister cr9 = { 9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+SwVfpRegister s0 = { 0 };
+SwVfpRegister s1 = { 1 };
+SwVfpRegister s2 = { 2 };
+SwVfpRegister s3 = { 3 };
+SwVfpRegister s4 = { 4 };
+SwVfpRegister s5 = { 5 };
+SwVfpRegister s6 = { 6 };
+SwVfpRegister s7 = { 7 };
+SwVfpRegister s8 = { 8 };
+SwVfpRegister s9 = { 9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
+
+DwVfpRegister d0 = { 0 };
+DwVfpRegister d1 = { 1 };
+DwVfpRegister d2 = { 2 };
+DwVfpRegister d3 = { 3 };
+DwVfpRegister d4 = { 4 };
+DwVfpRegister d5 = { 5 };
+DwVfpRegister d6 = { 6 };
+DwVfpRegister d7 = { 7 };
+DwVfpRegister d8 = { 8 };
+DwVfpRegister d9 = { 9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-thumb2-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+ ASSERT(is_uint5(shift_imm));
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ if (shift_op == RRX) {
+ // encoded as ROR with shift_imm == 0
+ ASSERT(shift_imm == 0);
+ shift_op_ = ROR;
+ shift_imm_ = 0;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+ ASSERT(shift_op != RRX);
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+ rn_ = rn;
+ rm_ = no_reg;
+ offset_ = offset;
+ am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+ am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
+ ASSERT(is_uint5(shift_imm));
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+ H = 1 << 5, // halfword (or byte)
+ S6 = 1 << 6, // signed (or unsigned)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks
+ RdMask = 15 << 12, // in str instruction
+ CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+ // Reserved condition
+ nv = 15 << 28
+};
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+static const Instr kPopInstruction =
+ al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+static const Instr kPushRegPattern =
+ al | B26 | 4 | NegPreIndex | sp.code() * B16;
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+static const Instr kPopRegPattern =
+ al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ num_prinfo_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ last_const_pool_end_ = 0;
+ last_bound_pos_ = 0;
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // emit constant pool if necessary
+ CheckConstPool(true, false);
+ ASSERT(num_prinfo_ == 0);
+
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ return instr - (Code::kHeaderSize - kHeapObjectTag);
+ }
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+ if ((instr & CondMask) == nv && (instr & B24) != 0)
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+
+ return pos + kPcLoadDelta + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+ int imm26 = target_pos - (pos + kPcLoadDelta);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ if ((instr & CondMask) == nv) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ ASSERT((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ } else {
+ ASSERT((imm26 & 3) == 0);
+ instr &= ~Imm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ ASSERT(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~Imm24Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
+ }
+ PrintF("%s%s\n", b, c);
+ }
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+ uint32_t* rotate_imm,
+ uint32_t* immed_8,
+ Instr* instr) {
+ // imm32 must be unsigned
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ if ((imm8 <= 0xff)) {
+ *rotate_imm = rot;
+ *immed_8 = imm8;
+ return true;
+ }
+ }
+ // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= 0x2*B21;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ } else if (rmode == RelocInfo::NONE) {
+ return false;
+ }
+ return true;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+ Register rn,
+ Register rd,
+ const Operand& x) {
+ CheckBuffer();
+ ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ if (!x.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if (MustUseIp(x.rmode_) ||
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, so load
+ // it first to register ip and change the original instruction to use ip.
+ // However, if the original instruction is a 'mov rd, x' (not setting the
+ // condition code), then replace it with a 'ldr rd, [pc]'
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
+ Condition cond = static_cast<Condition>(instr & CondMask);
+ if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ ldr(ip, MemOperand(pc, 0), cond);
+ addrmod1(instr, rn, rd, Operand(ip));
+ }
+ return;
+ }
+ instr |= I | rotate_imm*B8 | immed_8;
+ } else if (!x.rs_.is_valid()) {
+ // immediate shift
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ // register shift
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
+ emit(instr | rn.code()*B16 | rd.code()*B12);
+ if (rn.is(pc) || x.rm_.is(pc))
+ // block constant pool emission for one instruction after reading pc
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | B | L)) == B26);
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_12 = x.offset_;
+ if (offset_12 < 0) {
+ offset_12 = -offset_12;
+ am ^= U;
+ }
+ if (!is_uint12(offset_12)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_12 >= 0); // no masking needed
+ instr |= offset_12;
+ } else {
+ // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized
+ ASSERT(!x.rm_.is(pc));
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT(x.rn_.is_valid());
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_8 = x.offset_;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ if (!is_uint8(offset_8)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_8 >= 0); // no masking needed
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ } else if (x.shift_imm_ != 0) {
+ // scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ } else {
+ // register offset
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ instr |= x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+ ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT(rl != 0);
+ ASSERT(!rn.is(pc));
+ emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+ // unindexed addressing is not encoded by this function
+ ASSERT_EQ((B27 | B26),
+ (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ int am = x.am_;
+ int offset_8 = x.offset_;
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ offset_8 >>= 2;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+
+ // post-indexed addressing requires W == 1; different than in addrmod2/3
+ if ((am & P) == 0)
+ am |= W;
+
+ ASSERT(offset_8 >= 0); // no masking needed
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(pc_offset());
+ }
+
+ // Block the emission of the constant pool, since the branch instruction must
+ // be emitted at the pc offset recorded by the label
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ return target_pos - (pc_offset() + kPcLoadDelta);
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(at_offset);
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+ if (cond == al)
+ // dead code is a good location to emit the constant pool
+ CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) { // v5 and above
+ WriteRecordedPositions();
+ ASSERT((branch_offset & 1) == 0);
+ int h = ((branch_offset & 2) >> 1)*B24;
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) { // v5 and above
+ WriteRecordedPositions();
+ ASSERT(!target.is(pc));
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ WriteRecordedPositions();
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 4*B21 | s, src1, dst, src2);
+
+ // Eliminate pattern: push(r), pop()
+ // str(src, MemOperand(sp, 4, NegPreIndex), al);
+ // add(sp, sp, Operand(kPointerSize));
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+ (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
+ addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ // dst goes in bits 16-19 for this instruction!
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+ // v5 and above.
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+ 15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+ ASSERT(!dst.is(pc));
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+ Condition cond) {
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
+ Instr instr;
+ if (!src.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if (MustUseIp(src.rmode_) ||
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ // immediate operand cannot be encoded, load it first to register ip
+ RecordRelocInfo(src.rmode_, src.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ msr(fields, Operand(ip), cond);
+ return;
+ }
+ instr = I | rotate_imm*B8 | immed_8;
+ } else {
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ instr = src.rm_.code();
+ }
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
+ addrmod2(cond | B26 | L, dst, src);
+
+ // Eliminate pattern: push(r), pop(r)
+ // str(r, MemOperand(sp, 4, NegPreIndex), al)
+ // ldr(r, MemOperand(sp, 4, PostIndex), al)
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26, src, dst);
+
+ // Eliminate pattern: pop(), push(r)
+ // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
+ // -> str r, [sp, 0], al
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
+ pc_ -= 2 * kInstrSize;
+ emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+ addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+ Register base,
+ RegList dst,
+ Condition cond) {
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+ addrmod4(cond | B27 | am | L, base, dst);
+
+ // emit the constant pool after a function return implemented by ldm ..{..pc}
+ if (cond == al && (dst & pc.bit()) != 0) {
+ // There is a slight chance that the ldm instruction was actually a call,
+ // in which case it would be wrong to return into the constant pool; we
+ // recognize this case by checking if the emission of the pool was blocked
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+ // the case, we emit a jump over the pool.
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+ }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+ Register base,
+ RegList src,
+ Condition cond) {
+ addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+ Register src,
+ Register base,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+ // The simulator handles these special instructions and stops execution.
+ emit(15 << 28 | ((intptr_t) msg));
+#else
+ // Just issue a simple break instruction for now. Alternatively we could use
+ // the swi(0x9f0001) instruction on Linux.
+ bkpt(0);
+#endif
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { // v5 and above
+ ASSERT(is_uint16(imm16));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+ ASSERT(is_uint24(imm24));
+ emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& dst,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+ coproc, CRegister crd,
+ const MemOperand& dst,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Support for VFP.
+void Assembler::vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond) {
+ // Dm = <Rt,Rt2>.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src1.is(pc) && !src2.is(pc));
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // <Rt,Rt2> = Dm.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::vmov(const SwVfpRegister dst,
+ const Register src,
+ const Condition cond) {
+ // Sn = Rt.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src.is(pc));
+ emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+ src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Rt = Sn.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst.is(pc));
+ emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+ dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+ dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+ (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+ 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+ 0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vadd(Dn, Dm) double precision floating point addition.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-536.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vsub(Dn, Dm) double precision floating point subtraction.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vmul(Dn, Dm) double precision floating point multiplication.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vdiv(Dn, Dm) double precision floating point division.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-584.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+ const MemOperand& x,
+ SBit s,
+ Condition cond) {
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.offset_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.offset_), s, cond);
+ } else {
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized.
+ ASSERT(!x.rm_.is(pc));
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ }
+}
+
+
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+ uint32_t dummy1;
+ uint32_t dummy2;
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+// Debugging
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // none of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries
+
+ // relocate pending relocation entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ // Adjust code for new modes
+ ASSERT(RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode));
+ // these modes do not need an entry in the constant pool
+ } else {
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
+ prinfo_[num_prinfo_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
+ if (rinfo.rmode() != RelocInfo::NONE) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Calculate the offset of the next check. It will be overwritten
+ // when a const pool is generated or when const pools are being
+ // blocked for a specific range.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ // There is nothing to do if there are no pending relocation info entries
+ if (num_prinfo_ == 0) return;
+
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
+ // or when requested by parameter force_emit (e.g. after each function).
+ // We prefer not to emit a jump unless the max distance is reached or if we
+ // are running low on slots, which can happen if a lot of constants are being
+ // emitted (e.g. --debug-code and many static references).
+ int dist = pc_offset() - last_const_pool_end_;
+ if (!force_emit && dist < kMaxDistBetweenPools &&
+ (require_jump || dist < kDistBetweenPools) &&
+ // TODO(1236125): Cleanup the "magic" number below. We know that
+ // the code generation will test every kCheckConstIntervalInst.
+ // Thus we are safe as long as we generate less than 7 constant
+ // entries per instruction.
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+ return;
+ }
+
+ // If we did not return by now, we need to emit the constant pool soon.
+
+ // However, some small sequences of instructions must not be broken up by the
+ // insertion of a constant pool; such sequences are protected by setting
+ // no_const_pool_before_, which is checked here. Also, recursive calls to
+ // CheckConstPool are blocked by no_const_pool_before_.
+ if (pc_offset() < no_const_pool_before_) {
+ // Emission is currently blocked; make sure we try again as soon as possible
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Something is wrong if emission is forced and blocked at the same time
+ ASSERT(!force_emit);
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstrSize : 0;
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool and relocation information (include the jump over the pool and the
+ // constant pool marker).
+ int max_needed_space =
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+ // Block recursive calls to CheckConstPool
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+ num_prinfo_*kInstrSize);
+ // Don't bother to check for the emit calls below.
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Emit jump over constant pool if necessary
+ Label after_pool;
+ if (require_jump) b(&after_pool);
+
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker
+ // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ emit(0x03000000 | num_prinfo_);
+
+ // Emit constant pool entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+ Instr instr = instr_at(rinfo.pc());
+
+ // Instruction to patch must be a ldr/str [pc, #offset]
+ // P and U set, B and W clear, Rn == pc, offset12 still 0
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ (2*B25 | P | U | pc.code()*B16));
+ int delta = pc_ - rinfo.pc() - 8;
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
+ if (delta < 0) {
+ instr &= ~U;
+ delta = -delta;
+ }
+ ASSERT(is_uint12(delta));
+ instr_at_put(rinfo.pc(), instr + delta);
+ emit(rinfo.data());
+ }
+ num_prinfo_ = 0;
+ last_const_pool_end_ = pc_offset();
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm/assembler-thumb2.h b/deps/v8/src/arm/assembler-thumb2.h
new file mode 100644
index 0000000000..31e9487266
--- /dev/null
+++ b/deps/v8/src/arm/assembler-thumb2.h
@@ -0,0 +1,1027 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
+#define V8_ARM_ASSEMBLER_THUMB2_H_
+#include <stdio.h>
+#include "assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Single word VFP register.
+struct SwVfpRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 32; }
+ bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+ // Supporting d0 to d15, can be later extended to d31.
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
+ p10 = 10,
+ p11 = 11,
+ p12 = 12,
+ p13 = 13,
+ p14 = 14,
+ p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+ eq = 0 << 28, // Z set equal.
+ ne = 1 << 28, // Z clear not equal.
+ nz = 1 << 28, // Z clear not zero.
+ cs = 2 << 28, // C set carry set.
+ hs = 2 << 28, // C set unsigned higher or same.
+ cc = 3 << 28, // C clear carry clear.
+ lo = 3 << 28, // C clear unsigned lower.
+ mi = 4 << 28, // N set negative.
+ pl = 5 << 28, // N clear positive or zero.
+ vs = 6 << 28, // V set overflow.
+ vc = 7 << 28, // V clear no overflow.
+ hi = 8 << 28, // C set, Z clear unsigned higher.
+ ls = 9 << 28, // C clear or Z set unsigned lower or same.
+ ge = 10 << 28, // N == V greater or equal.
+ lt = 11 << 28, // N != V less than.
+ gt = 12 << 28, // Z clear, N == V greater than.
+ le = 13 << 28, // Z set or N != V less then or equal
+ al = 14 << 28 // always.
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cc;
+ };
+}
+
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+ LSL = 0 << 5,
+ LSR = 1 << 5,
+ ASR = 2 << 5,
+ ROR = 3 << 5,
+ RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+ SetCC = 1 << 20, // set condition code
+ LeaveCC = 0 << 20 // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (without writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+ // bit encoding P U W
+ da = (0|0|0) << 21, // decrement after
+ ia = (0|4|0) << 21, // increment after
+ db = (8|0|0) << 21, // decrement before
+ ib = (8|4|0) << 21, // increment before
+ da_w = (0|0|1) << 21, // decrement after with writeback to base
+ ia_w = (0|4|1) << 21, // increment after with writeback to base
+ db_w = (8|0|1) << 21, // decrement before with writeback to base
+ ib_w = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+ Long = 1 << 22, // long load/store coprocessor
+ Short = 0 << 22 // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // rm <shift_op> shift_imm
+ explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+ // rm <shift_op> rs
+ explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ Register rs_;
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int32_t imm32_; // valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+ // [rn +/- offset] Offset/NegOffset
+ // [rn +/- offset]! PreIndex/NegPreIndex
+ // [rn], +/- offset PostIndex/NegPostIndex
+ // offset is any signed 32-bit value; offset is first loaded to register ip if
+ // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+ // [rn +/- rm] Offset/NegOffset
+ // [rn +/- rm]! PreIndex/NegPreIndex
+ // [rn], +/- rm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+ // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
+ // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
+ // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+ Register rn_; // base
+ Register rm_; // register offset
+ int32_t offset_; // valid if rm_ == no_reg
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ AddrMode am_; // bits P, U, and W
+
+ friend class Assembler;
+};
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ static unsigned supported_;
+ static unsigned enabled_;
+ static unsigned found_by_runtime_probing_;
+};
+
+
+typedef int32_t Instr;
+
+
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if the second parameter is true.
+ int branch_offset(Label* L, bool jump_elimination_allowed);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ INLINE(static Address target_address_address_at(Address pc));
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc));
+ INLINE(static void set_target_address_at(Address pc, Address target));
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address constant_pool_entry, Address target);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address constant_pool_entry,
+ Address target) {
+ set_target_at(constant_pool_entry, target);
+ }
+
+ // Here we are patching the address in the constant pool, not the actual call
+ // instruction. The address in the constant pool is the same size as a
+ // pointer.
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Distance between the instruction referring to the address of the call
+ // target (ldr pc, [target addr in const pool]) and the return address
+ static const int kCallTargetAddressOffset = kInstrSize;
+
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 8;
+
+ static const int kJSReturnSequenceLength = 4;
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Branch instructions
+ void b(int branch_offset, Condition cond = al);
+ void bl(int branch_offset, Condition cond = al);
+ void blx(int branch_offset); // v5 and above
+ void blx(Register target, Condition cond = al); // v5 and above
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+
+ // Convenience branch instructions using labels
+ void b(Label* L, Condition cond = al) {
+ b(branch_offset(L, cond == al), cond);
+ }
+ void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
+ void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
+ void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
+ void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
+
+ // Data-processing instructions
+ void and_(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void eor(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sub(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void sub(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ sub(dst, src1, Operand(src2), s, cond);
+ }
+
+ void rsb(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void add(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void adc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sbc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void tst(Register src1, const Operand& src2, Condition cond = al);
+ void tst(Register src1, Register src2, Condition cond = al) {
+ tst(src1, Operand(src2), cond);
+ }
+
+ void teq(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmp(Register src1, const Operand& src2, Condition cond = al);
+ void cmp(Register src1, Register src2, Condition cond = al) {
+ cmp(src1, Operand(src2), cond);
+ }
+
+ void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+ void orr(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void orr(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ orr(dst, src1, Operand(src2), s, cond);
+ }
+
+ void mov(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+ void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
+ mov(dst, Operand(src), s, cond);
+ }
+
+ void bic(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mvn(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Multiply instructions
+
+ void mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mul(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Miscellaneous arithmetic instructions
+
+ void clz(Register dst, Register src, Condition cond = al); // v5 and above
+
+ // Status register access instructions
+
+ void mrs(Register dst, SRegister s, Condition cond = al);
+ void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+ // Load/Store instructions
+ void ldr(Register dst, const MemOperand& src, Condition cond = al);
+ void str(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+ void strb(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+ void strh(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+ void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+ // Load/Store multiple instructions
+ void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+ void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+ // Semaphore instructions
+ void swp(Register dst, Register src, Register base, Condition cond = al);
+ void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+
+ void bkpt(uint32_t imm16); // v5 and above
+ void swi(uint32_t imm24, Condition cond = al);
+
+ // Coprocessor instructions
+
+ void cdp(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2, Condition cond = al);
+
+ void cdp2(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2); // v5 and above
+
+ void mcr(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mcr2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void mrc(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mrc2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short, Condition cond = al);
+ void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short); // v5 and above
+ void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short, Condition cond = al);
+ void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short); // v5 and above
+ void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ // Support for VFP.
+ // All these APIs support S0 to S31 and D0 to D15.
+ // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+ // However, some simple modifications can allow
+ // these APIs to support D16 to D31.
+
+ void vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond = al);
+ void vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const SwVfpRegister dst,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+
+ void vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void vmrs(const Register dst,
+ const Condition cond = al);
+
+ // Pseudo instructions
+ void nop() { mov(r0, Operand(r0)); }
+
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+ }
+
+ void pop() {
+ add(sp, sp, Operand(kPointerSize));
+ }
+
+ // Load effective address of memory operand x into register dst
+ void lea(Register dst, const MemOperand& x,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L, al); }
+
+ // Check the code size generated from label to here.
+ int InstructionsGeneratedSince(Label* l) {
+ return (pc_offset() - l->pos()) / kInstrSize;
+ }
+
+ // Check whether an immediate fits an addressing mode 1 instruction.
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Debugging
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+ void WriteRecordedPositions();
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int current_position() const { return current_position_; }
+ int current_statement_position() const { return current_position_; }
+
+ protected:
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ // Check if is time to emit a constant pool for pending reloc info entries
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Block the emission of the constant pool before pc_offset
+ void BlockConstPoolBefore(int pc_offset) {
+ if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ }
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ int next_buffer_check_; // pc offset of next buffer check
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+ // Pools are emitted after function return and in dead code at (more or less)
+ // regular intervals of kDistBetweenPools bytes
+ static const int kDistBetweenPools = 1*KB;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences
+ int no_const_pool_before_; // block emission before this pc offset
+
+ // Keep track of the last emitted pool to guarantee a maximal distance
+ int last_const_pool_end_; // pc offset following the last constant pool
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
+ int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // source position information
+ int current_position_;
+ int current_statement_position_;
+ int written_position_;
+ int written_statement_position_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation
+ void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+ void addrmod2(Instr instr, Register rd, const MemOperand& x);
+ void addrmod3(Instr instr, Register rd, const MemOperand& x);
+ void addrmod4(Instr instr, Register rn, RegList rl);
+ void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+ // Labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ friend class RegExpMacroAssemblerARM;
+ friend class RelocInfo;
+ friend class CodePatcher;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index ea3df6cfbe..89d974c73d 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1769,9 +1769,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
@@ -1910,9 +1908,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_reg(r0);
- __ mov(r0, Operand(1));
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -3660,9 +3656,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3670,9 +3664,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3684,9 +3676,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3736,9 +3726,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3760,9 +3748,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
break;
}
@@ -3847,9 +3833,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -4235,9 +4219,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
}
@@ -5079,10 +5061,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
- __ fcmp(d6, d7);
+ __ vcmp(d6, d7);
__ vmrs(pc);
__ mov(r0, Operand(0), LeaveCC, eq);
__ mov(r0, Operand(1), LeaveCC, lt);
@@ -5145,7 +5127,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -5244,7 +5225,6 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only first argument is a string.
__ bind(&string1);
- __ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second.
@@ -5256,13 +5236,11 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only second argument is a string.
__ b(&not_strings);
- __ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
}
- __ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5353,22 +5331,22 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
- __ fmuld(d5, d6, d7);
+ __ vmul(d5, d6, d7);
} else if (Token::DIV == operation) {
- __ fdivd(d5, d6, d7);
+ __ vdiv(d5, d6, d7);
} else if (Token::ADD == operation) {
- __ faddd(d5, d6, d7);
+ __ vadd(d5, d6, d7);
} else if (Token::SUB == operation) {
- __ fsubd(d5, d6, d7);
+ __ vsub(d5, d6, d7);
} else {
UNREACHABLE();
}
- __ fmrrd(r0, r1, d5);
+ __ vmov(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@@ -5457,9 +5435,9 @@ static void GetInt32(MacroAssembler* masm,
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- __ fmdrr(d7, scratch2, scratch);
- __ ftosid(s15, d7);
- __ fmrs(dest, s15);
+ __ vmov(d7, scratch2, scratch);
+ __ vcvt(s15, d7);
+ __ vmov(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@@ -5598,7 +5576,6 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5703,6 +5680,29 @@ static void MultiplyByKnownInt2(
}
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int len = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, len),
+ "GenericBinaryOpStub_%s_%s%s",
+ op_name,
+ overwrite_name,
+ specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ return name_;
+}
+
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -5980,7 +5980,6 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(&not_smi);
@@ -6456,7 +6455,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin.
__ bind(&slow);
- __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index ba7f93626d..e9f11e9c6e 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -455,13 +455,15 @@ class GenericBinaryOpStub : public CodeStub {
: op_(op),
mode_(mode),
constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+ name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
int constant_rhs_;
bool specialized_on_rhs_;
+ char* name_;
static const int kMaxKnownRhs = 0x40000000;
@@ -506,22 +508,7 @@ class GenericBinaryOpStub : public CodeStub {
return key;
}
- const char* GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::MOD: return "GenericBinaryOpStub_MOD";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
- }
- }
+ const char* GetName();
#ifdef DEBUG
void Print() {
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 2f9e78f534..afed0fa5c3 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -897,15 +897,14 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// void Decoder::DecodeTypeVFP(Instr* instr)
-// Implements the following VFP instructions:
-// fmsr: Sn = Rt
-// fmrs: Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov: Sn = Rt
+// vmov: Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
@@ -997,8 +996,8 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index ab636b6b88..55d87b7c2d 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -414,78 +414,98 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
- ASSERT(slot != NULL); // No global declarations here.
-
- // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
- switch (slot->type()) {
- case Slot::LOOKUP: {
- __ mov(r2, Operand(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
- PropertyAttributes attr = decl->mode() == Variable::VAR ?
- NONE : READ_ONLY;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ mov(r0, Operand(Factory::the_hole_value()));
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- } else if (decl->fun() != NULL) {
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
- Visit(decl->fun()); // Initial value for function decl.
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ mov(r0, Operand(Factory::the_hole_value()));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(r0);
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- }
- break;
- case Slot::CONTEXT:
- // The variable in the decl always resides in the current context.
- ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
- if (decl->mode() == Variable::CONST) {
- __ mov(r0, Operand(Factory::the_hole_value()));
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(ip);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
- __ ldr(r1, CodeGenerator::ContextOperand(cp,
- Context::FCONTEXT_INDEX));
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
}
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
- // No write barrier since the_hole_value is in old space.
- ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(r0);
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ RecordWrite(cp, r2, r0);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ Visit(decl->fun()); // Initial value for function decl.
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
Visit(decl->fun());
__ pop(r0);
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ ldr(r1, CodeGenerator::ContextOperand(cp,
- Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
- }
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
- // We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, r0);
+ } else {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
}
- break;
- default:
- UNREACHABLE();
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ }
}
}
@@ -501,21 +521,6 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- Expression* expr = stmt->expression();
- // Complete the statement based on the type of the subexpression.
- if (expr->AsLiteral() != NULL) {
- __ mov(r0, Operand(expr->AsLiteral()->handle()));
- } else {
- ASSERT_EQ(Expression::kValue, expr->context());
- Visit(expr);
- __ pop(r0);
- }
- EmitReturnSequence(stmt->statement_pos());
-}
-
-
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -536,18 +541,24 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- Expression* rewrite = expr->var()->rewrite();
+ EmitVariableLoad(expr->var(), expr->context());
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ Expression* rewrite = var->rewrite();
if (rewrite == NULL) {
- ASSERT(expr->var()->is_global());
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
- __ mov(r2, Operand(expr->name()));
+ __ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndMove(expr->context(), r0);
+ DropAndMove(context, r0);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@@ -568,7 +579,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNREACHABLE();
}
}
- Move(expr->context(), slot, r0);
+ Move(context, slot, r0);
} else {
// A variable has been rewritten into an explicit access to
// an object property.
@@ -603,7 +614,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(expr->context(), r0, 2);
+ DropAndMove(context, r0, 2);
}
}
@@ -637,32 +648,15 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Label boilerplate_exists;
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // r2 = literal array (0).
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r2, literal_offset));
- // Check whether we need to materialize the object literal boilerplate.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, Operand(ip));
- __ b(ne, &boilerplate_exists);
- // Create boilerplate if it does not exist.
- // r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
- // r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ bind(&boilerplate_exists);
- // r0 contains boilerplate.
- // Clone boilerplate.
- __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
// If result_saved == true: The result is saved on top of the
@@ -763,32 +757,15 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Label make_clone;
-
- // Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- // Check if the literal's boilerplate has been instantiated.
- int offset =
- FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
- __ ldr(r0, FieldMemOperand(r3, offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(&make_clone, ne);
-
- // Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->literals()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
- __ bind(&make_clone);
- // Clone the boilerplate.
- __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -860,10 +837,38 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
+ Expression::Context context) {
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ Move(context, r0);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ Move(context, r0);
+}
+
+
+void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
+ Expression::Context context) {
+ __ pop(r0);
+ __ pop(r1);
+ GenericBinaryOpStub stub(op,
+ NO_OVERWRITE);
+ __ CallStub(&stub);
+ Move(context, r0);
+}
+
+
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
-
+ ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
@@ -976,35 +981,6 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
- } else {
- Property* property = var->rewrite()->AsProperty();
- ASSERT_NOT_NULL(property);
-
- // Load object and key onto the stack.
- Slot* object_slot = property->obj()->AsSlot();
- ASSERT_NOT_NULL(object_slot);
- Move(Expression::kValue, object_slot, r0);
-
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- Move(Expression::kValue, key_literal);
-
- // Value to store was pushed before object and key on the stack.
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Arguments to ic is value in r0, object and key on stack.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- if (expr->context() == Expression::kEffect) {
- __ add(sp, sp, Operand(3 * kPointerSize));
- } else if (expr->context() == Expression::kValue) {
- // Value is still on the stack in esp[2 * kPointerSize]
- __ add(sp, sp, Operand(2 * kPointerSize));
- } else {
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- DropAndMove(expr->context(), r0, 3);
- }
}
}
@@ -1104,7 +1080,9 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
DropAndMove(expr->context(), r0);
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1117,7 +1095,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ Call(ic, reloc_info);
+ __ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -1157,7 +1135,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1171,7 +1149,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ mov(r0, Operand(key->handle()));
__ push(r0);
Visit(prop->obj());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@@ -1706,7 +1684,63 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-#undef __
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), r0);
+}
+
+
+Register FastCodeGenerator::result_register() { return r0; }
+
+
+Register FastCodeGenerator::context_register() { return cp; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ str(value, MemOperand(fp, frame_offset));
+}
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FastCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta)
+ __ sub(r1, lr, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ push(r1);
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Restore result register from stack.
+ __ pop(r1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ add(pc, r1, Operand(masm_->CodeObject()));
+}
+
+
+void FastCodeGenerator::ThrowException() {
+ __ push(result_register());
+ __ CallRuntime(Runtime::kThrow, 1);
+}
+
+
+#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index b0fa13a5a1..0cb7f12302 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -28,7 +28,11 @@
#include "v8.h"
#include "frames-inl.h"
+#ifdef V8_ARM_VARIANT_THUMB
+#include "arm/assembler-thumb2-inl.h"
+#else
#include "arm/assembler-arm-inl.h"
+#endif
namespace v8 {
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index c56f414a14..b57aa93967 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -276,7 +276,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
@@ -371,13 +371,11 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
-void CallIC::Generate(MacroAssembler* masm,
- int argc,
- const ExternalReference& f) {
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -394,7 +392,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(f));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
CEntryStub stub(1);
__ CallStub(&stub);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index aa6570ce11..876eec109c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -162,6 +162,21 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
+void MacroAssembler::Drop(int stack_elements, Condition cond) {
+ if (stack_elements > 0) {
+ add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
+ }
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ bl(target);
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+ mov(dst, Operand(value));
+}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@@ -628,6 +643,15 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
+void MacroAssembler::PopTryHandler() {
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(r1);
+ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ str(r1, MemOperand(ip));
+}
+
+
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@@ -994,9 +1018,9 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
- fmsr(s15, r7);
- fsitod(d7, s15);
- fmrrd(outLowReg, outHighReg, d7);
+ vmov(s15, r7);
+ vcvt(d7, s15);
+ vmov(outLowReg, outHighReg, d7);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 09743290f6..88bfa9ce0a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -64,6 +64,9 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
+ void Drop(int stack_elements, Condition cond = al);
+ void Call(Label* target);
+ void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
@@ -148,6 +151,9 @@ class MacroAssembler: public Assembler {
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 9dc417bb71..f3927720fb 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1893,14 +1893,14 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
-// fmsr :Sn = Rt
-// fmrs :Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov :Sn = Rt
+// vmov :Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
@@ -2020,8 +2020,8 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index efccaf4960..958842d2c8 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -446,7 +446,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -454,7 +454,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss) {
+ Label* miss,
+ Failure** failure) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@@ -476,6 +477,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
+
+ return true;
}
@@ -774,8 +777,26 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, &miss);
+ if (Heap::InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ b(ne, &miss);
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ cmp(r2, r3);
+ __ b(ne, &miss);
+ } else {
+ __ cmp(r1, Operand(Handle<JSFunction>(function)));
+ __ b(ne, &miss);
+ }
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1003,10 +1024,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
- AccessorInfo* callback,
- String* name) {
+ AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1015,7 +1036,11 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss;
__ ldr(r0, MemOperand(sp, 0));
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1168,7 +1193,11 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 47ecb96360..132c8aebc1 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -243,11 +243,8 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- Result* arg_count_register,
int arg_count) {
- ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
- arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 457478da92..d5230007a4 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -305,7 +305,6 @@ class VirtualFrame : public ZoneObject {
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
- Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes