summaryrefslogtreecommitdiff
path: root/chromium/v8/src/ia32
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/v8/src/ia32
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/v8/src/ia32')
-rw-r--r--chromium/v8/src/ia32/assembler-ia32-inl.h33
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc193
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.h85
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc113
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc1901
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.h31
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc61
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc102
-rw-r--r--chromium/v8/src/ia32/disasm-ia32.cc52
-rw-r--r--chromium/v8/src/ia32/full-codegen-ia32.cc386
-rw-r--r--chromium/v8/src/ia32/ic-ia32.cc68
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.cc1032
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.h87
-rw-r--r--chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc20
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.cc404
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.h199
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc456
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h95
-rw-r--r--chromium/v8/src/ia32/simulator-ia32.cc1
-rw-r--r--chromium/v8/src/ia32/stub-cache-ia32.cc1176
20 files changed, 2605 insertions, 3890 deletions
diff --git a/chromium/v8/src/ia32/assembler-ia32-inl.h b/chromium/v8/src/ia32/assembler-ia32-inl.h
index 5a35b207f72..ee5d991e38a 100644
--- a/chromium/v8/src/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
@@ -124,12 +125,6 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
@@ -144,9 +139,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ return Memory::Address_at(pc_);
}
@@ -190,6 +185,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
@@ -241,6 +243,18 @@ Object** RelocInfo::call_object_address() {
}
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
return *pc_ == kCallOpcode;
}
@@ -379,7 +393,8 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index e5456da4746..733432028af 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -53,6 +53,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
@@ -88,8 +89,6 @@ const char* IntelDoubleRegister::AllocationIndexToString(int index) {
}
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -552,6 +551,16 @@ void Assembler::mov_w(const Operand& dst, Register src) {
}
+void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(imm16 & 0xff));
+ EMIT(static_cast<int8_t>(imm16 >> 8));
+}
+
+
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@@ -1131,30 +1140,21 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
} else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
}
+ emit(imm);
}
@@ -1178,6 +1178,9 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1185,9 +1188,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
@@ -1402,7 +1422,8 @@ void Assembler::call(Handle<Code> code,
TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
emit(code, rmode, ast_id);
}
@@ -2046,7 +2067,26 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
@@ -2054,39 +2094,68 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
}
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0xF2);
EMIT(0x0F);
- EMIT(0x51);
+ EMIT(0x58);
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0x66);
EMIT(0x0F);
- EMIT(0x54);
+ EMIT(0x5C);
emit_sse_operand(dst, src);
}
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x56);
+ EMIT(0x54);
emit_sse_operand(dst, src);
}
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x2E);
+ EMIT(0x56);
emit_sse_operand(dst, src);
}
@@ -2163,6 +2232,17 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(IsEnabled(SSE2));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2235,18 +2315,6 @@ void Assembler::prefetch(const Operand& src, int level) {
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2267,16 +2335,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
}
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movss(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2297,16 +2355,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
}
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2335,7 +2383,7 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
EMIT(imm8);
}
@@ -2474,6 +2522,11 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ EMIT(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
void Assembler::Print() {
Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
diff --git a/chromium/v8/src/ia32/assembler-ia32.h b/chromium/v8/src/ia32/assembler-ia32.h
index 55eff931907..6ed0bc6d662 100644
--- a/chromium/v8/src/ia32/assembler-ia32.h
+++ b/chromium/v8/src/ia32/assembler-ia32.h
@@ -535,32 +535,54 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
+ if (Check(f, cross_compile_)) return true;
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -616,13 +638,6 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
@@ -713,6 +728,7 @@ class Assembler : public AssemblerBase {
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, Register src);
+ void mov_w(const Operand& dst, int16_t imm16);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
@@ -852,7 +868,7 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
@@ -995,8 +1011,31 @@ class Assembler : public AssemblerBase {
void cpuid();
+ // SSE instructions
+ void movaps(XMMRegister dst, XMMRegister src);
+ void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
+ void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
+ void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
+ void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
+
+ void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
+ void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
+ void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
+ void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src) {
+ cvttss2si(dst, Operand(src));
+ }
void cvttsd2si(Register dst, const Operand& src);
void cvtsd2si(Register dst, XMMRegister src);
@@ -1012,13 +1051,12 @@ class Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
@@ -1036,8 +1074,6 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -1050,19 +1086,18 @@ class Assembler : public AssemblerBase {
}
}
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
- void movss(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
@@ -1136,16 +1171,14 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- byte byte_at(int pos) { return buffer_[pos]; }
+ byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst, Register src);
byte* addr_at(int pos) { return buffer_ + pos; }
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index a1597481aa6..5a3fa78e339 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
@@ -561,7 +563,46 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -570,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -580,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ } else {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ }
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -628,25 +684,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
- // TODO(mvstanton): We should save these regs, do this in a future
- // checkin.
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
@@ -1063,13 +1100,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -1326,6 +1361,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
#undef __
}
} // namespace v8::internal
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index a83c1ae91d1..04818149202 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -71,7 +82,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -82,7 +93,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -107,6 +118,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -127,6 +149,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -157,15 +192,21 @@ static void InitializeArrayConstructorDescriptor(
// eax -- number of arguments
// edi -- function
// ebx -- type info cell with elements kind
- static Register registers[] = { edi, ebx };
- descriptor->register_param_count_ = 2;
+ static Register registers_variable_args[] = { edi, ebx, eax };
+ static Register registers_no_args[] = { edi, ebx };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -179,15 +220,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// eax -- number of arguments
// edi -- constructor function
- static Register registers[] = { edi };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { edi, eax };
+ static Register registers_no_args[] = { edi };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -283,6 +330,29 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -432,7 +502,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
@@ -448,7 +518,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
@@ -470,18 +540,6 @@ class FloatingPointHelper : public AllStatic {
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -489,32 +547,11 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -658,1259 +695,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats, not_int32, right_arg_changed;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ebx, ecx, xmm2);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm1, edi, ecx, xmm2);
- if (op_ == Token::MOD) {
- if (encoded_right_arg_.has_value) {
- __ cmp(edi, Immediate(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -2034,7 +818,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2049,7 +833,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -2062,17 +846,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -2098,13 +882,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -2221,79 +1005,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- __ TruncateHeapNumberToI(edx, edx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- __ TruncateHeapNumberToI(ecx, eax);
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2320,7 +1031,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -2329,109 +1040,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -2470,7 +1092,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -2485,12 +1107,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2501,7 +1123,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2509,7 +1131,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
@@ -2604,9 +1226,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -2629,7 +1251,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -2683,7 +1305,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2696,7 +1318,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
@@ -2704,8 +1326,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
@@ -2713,7 +1335,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
@@ -2756,8 +1378,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
}
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3495,7 +2116,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3768,106 +2389,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -4205,6 +2726,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
@@ -4224,9 +2746,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ j(not_equal, &miss);
@@ -4265,6 +2786,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
@@ -4430,20 +2952,19 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ } else {
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ }
}
@@ -4456,7 +2977,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
save_doubles_code = *(save_doubles.GetCode(isolate));
}
- save_doubles_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
}
@@ -4464,8 +2984,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -4508,6 +3027,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -5455,33 +3976,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ ret(0);
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ ret(0);
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5517,12 +4016,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
__ bind(&done);
@@ -6253,24 +4747,24 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
+ __ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ Cvtsi2sd(xmm0, ecx);
__ bind(&done);
// Compare operands.
@@ -6784,90 +5278,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
- { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub and StringAddStub::Generate
- { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
- // StringAddStub::Generate
- { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub(kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate);
}
}
@@ -7180,12 +5597,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ mov(edi, eax);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(eax, MemOperand(ebp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ sub(eax, Immediate(1));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(eax);
+ __ InvokeFunction(
+ edi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- // It's always safe to call the entry hook stub, as the hook itself
- // is not allowed to call back to V8.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -7300,17 +5731,18 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ inc(edx);
__ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ Assert(equal, kExpectedAllocationSiteInCell);
}
- // Save the resulting elements kind in type info
- __ SmiTag(edx);
- __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
- __ SmiUntag(edx);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ add(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -7343,12 +5775,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -7370,11 +5802,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -7447,12 +5879,15 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
- masm->isolate()->heap()->allocation_site_map())));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
+ // Only look at the lower 16 bits of the transition info.
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.h b/chromium/v8/src/ia32/code-stubs-ia32.h
index 5c8eca37b5b..14259241c85 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.h
+++ b/chromium/v8/src/ia32/code-stubs-ia32.h
@@ -74,7 +74,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -217,30 +216,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -327,8 +302,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -468,7 +441,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
@@ -480,7 +453,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index 84a4d238bd4..ab4029da119 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -117,7 +117,7 @@ UnaryMathFunction CreateExpFunction() {
CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
@@ -125,7 +125,7 @@ UnaryMathFunction CreateExpFunction() {
__ pop(ebx);
__ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
@@ -155,9 +155,9 @@ UnaryMathFunction CreateSqrtFunction() {
// Move double input into registers.
{
CpuFeatureScope use_sse2(&masm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
@@ -462,10 +462,10 @@ OS::MemMoveFunction CreateMemMoveFunction() {
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
- __ movdbl(xmm0, Operand(src, 0));
- __ movdbl(xmm1, Operand(src, count, times_1, -8));
- __ movdbl(Operand(dst, 0), xmm0);
- __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
@@ -666,8 +666,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
}
// Set transitioned map.
@@ -694,8 +693,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -743,7 +741,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(the_hole_nan,
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
@@ -768,8 +766,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
@@ -789,7 +787,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -833,8 +831,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -899,9 +896,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0,
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1081,20 +1078,20 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movdbl(double_scratch, ExpConstant(0));
+ __ movsd(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
+ __ movsd(result, ExpConstant(2));
__ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
+ __ movsd(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
@@ -1111,16 +1108,15 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
+ __ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
+ __ orps(input, double_scratch);
__ mulsd(result, input);
__ bind(&done);
}
#undef __
-static const int kNoCodeAgeSequenceLength = 5;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
@@ -1153,7 +1149,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -1165,16 +1161,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index 13a70afe521..5300dde9a21 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// sub <profiling_counter>, <delta>
-// jns ok
-// call <interrupt stub>
-// ok:
-//
-// The patched back edge looks like this:
-//
-// sub <profiling_counter>, <delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -283,16 +202,14 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
@@ -314,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
+ Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
+ return isolate_->builtins()->builtin(name);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -330,7 +254,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ __ movsd(Operand(esp, offset), xmm_reg);
}
}
@@ -382,8 +306,8 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
}
@@ -468,7 +392,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
diff --git a/chromium/v8/src/ia32/disasm-ia32.cc b/chromium/v8/src/ia32/disasm-ia32.cc
index 01fa9996456..057a558e28f 100644
--- a/chromium/v8/src/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/ia32/disasm-ia32.cc
@@ -942,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
@@ -1042,14 +1042,30 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte == 0x57) {
+ } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ AppendToBuffer("%s %s,",
+ pseudo_op[f0byte - 0x53],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
data += 2;
int mod, regop, rm;
@@ -1058,6 +1074,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte== 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d",
+ NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1189,6 +1216,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov_w ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0xC7) {
+ data++;
+ AppendToBuffer("%s ", "mov_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
} else if (*data == 0x0F) {
data++;
if (*data == 0x38) {
@@ -1239,8 +1273,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
diff --git a/chromium/v8/src/ia32/full-codegen-ia32.cc b/chromium/v8/src/ia32/full-codegen-ia32.cc
index 6d39cc1e6e5..3c5d4aa2788 100644
--- a/chromium/v8/src/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/full-codegen-ia32.cc
@@ -158,10 +158,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1117,7 +1114,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1577,6 +1574,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1586,21 +1585,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1711,6 +1704,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1722,6 +1720,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1734,35 +1740,27 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1) {
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2247,7 +2245,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2332,7 +2330,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
@@ -3062,6 +3060,32 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
+ __ j(not_equal, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3282,57 +3306,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm(), SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3424,32 +3397,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiIndex);
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, kIndexIsNegative);
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmp(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
- __ pop(value);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3460,18 +3407,26 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ pop(value);
+ __ pop(index);
if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ test(value, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
}
__ SmiUntag(value);
__ SmiUntag(index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
__ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
@@ -3488,13 +3443,19 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ SmiUntag(index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index);
}
__ SmiUntag(value);
@@ -3555,8 +3516,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3681,11 +3642,20 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ if (FLAG_new_string_add) {
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
+ __ pop(edx);
+ NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ } else {
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ CallStub(&stub);
+ }
context()->Plug(eax);
}
@@ -3703,42 +3673,6 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -4408,14 +4342,50 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(eax, &no_conversion, Label::kNear);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ if (expr->op() == Token::INC) {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ j(no_overflow, &done, Label::kNear);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4437,37 +4407,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- }
-
// Record position before stub call.
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
@@ -4897,6 +4844,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/ic-ia32.cc b/chromium/v8/src/ia32/ic-ia32.cc
index 327ac57623e..2973beb3e46 100644
--- a/chromium/v8/src/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ia32/ic-ia32.cc
@@ -611,7 +611,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -653,7 +653,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -678,7 +678,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(eax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -707,7 +707,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -733,6 +733,19 @@ static void KeyedStoreGenerateGenericHelper(
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(CodeGenerator::FixedArrayElementOperand(ebx, ecx),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
@@ -773,6 +786,16 @@ static void KeyedStoreGenerateGenericHelper(
// If the value is a number, store it as a double in the FastDoubleElements
// array.
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
&transition_double_elements, false);
@@ -851,10 +874,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &slow);
@@ -929,7 +952,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -1038,7 +1061,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1109,7 +1132,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1226,7 +1249,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1304,7 +1327,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1373,7 +1396,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -1388,10 +1411,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1414,16 +1435,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
-
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1528,7 +1548,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1543,10 +1563,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
index 025740d4575..df2d4c5294d 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
@@ -120,24 +120,6 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -148,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
#endif
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(esp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -178,7 +194,7 @@ bool LCodeGen::GeneratePrologue() {
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ __ Set(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -206,15 +222,8 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
}
if (info()->IsOptimizing() &&
@@ -269,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- Comment(";;; Save clobbered callee double registers");
- CpuFeatureScope scope(masm(), SSE2);
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
}
@@ -340,12 +339,41 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
+ // Move state of dynamic frame alignment into edx.
+ __ Set(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
// Save the first local, which is overwritten by the alignment state.
Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
__ push(alignment_loc);
- // Set the dynamic frame alignment state to "not aligned".
- __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
@@ -355,44 +383,27 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
-
- RecordAndUpdatePosition(instr->position());
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+}
- instr->CompileToNative(this);
- if (!CpuFeatures::IsSupported(SSE2)) {
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- __ VerifyX87StackDepth(x87_stack_.depth());
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
}
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
}
@@ -412,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
@@ -438,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ RestoreCallerDoubles();
+ }
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
@@ -453,8 +468,9 @@ bool LCodeGen::GenerateDeferredCode() {
X87Stack copy(code->x87_stack());
x87_stack_ = copy;
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -532,6 +548,16 @@ void LCodeGen::X87LoadForUsage(X87Register reg) {
}
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
ASSERT(is_mutable_);
ASSERT(Contains(reg) && stack_depth_ > other_slot);
@@ -783,17 +809,36 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(
+ esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -931,8 +976,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -954,13 +997,12 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -1048,7 +1090,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ if (DeoptEveryNTimes()) {
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
@@ -1122,26 +1164,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -1246,7 +1293,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -1258,17 +1305,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1336,11 +1376,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1392,36 +1427,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ and_(left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
@@ -1733,9 +1738,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1967,9 +1972,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ orps(res, xmm_scratch);
}
}
}
@@ -2058,7 +2064,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
@@ -2070,32 +2076,87 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToRepresentation(LConstantOperand::cast(index),
+ Representation::Integer32());
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ __ push(string);
+ __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(value);
+ __ pop(string);
}
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
+ __ movzx_b(result, operand);
+ } else {
+ __ movzx_w(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+ Representation::Integer32());
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ mov_b(operand, static_cast<int8_t>(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ mov_w(operand, static_cast<int16_t>(value));
+ }
} else {
- __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov_w(operand, value);
+ }
}
}
@@ -2178,7 +2239,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -2195,7 +2256,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
@@ -2208,8 +2269,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
@@ -2229,17 +2288,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
+ // Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
@@ -2272,6 +2331,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(4, eax);
X87Mov(Operand(esp, 1 * kDoubleSize), right);
X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
@@ -2295,20 +2356,12 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -2340,25 +2393,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2369,8 +2403,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2390,8 +2425,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2476,8 +2512,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2521,6 +2558,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2556,10 +2597,15 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), SSE2);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
@@ -2615,7 +2661,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ fld(0);
__ FCmp();
Label ok;
- __ j(parity_even, &ok);
+ __ j(parity_even, &ok, Label::kNear);
__ fstp(0);
EmitFalseBranch(instr, no_condition);
__ bind(&ok);
@@ -2626,7 +2672,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
+ __ movsd(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
@@ -2638,6 +2684,35 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ CpuFeatureScope use_sse2(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, value);
+ EmitFalseBranch(instr, not_equal);
+ __ movmskpd(scratch, value);
+ __ test(scratch, Immediate(1));
+ EmitBranch(instr, not_zero);
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ EmitFalseBranch(instr, not_equal);
+ __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2939,7 +3014,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
// A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2952,18 +3027,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
__ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
+ __ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
+ __ j(is_string, &false_result, Label::kNear);
// Go to the deferred code.
__ jmp(deferred->entry());
@@ -3016,14 +3091,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -3090,17 +3157,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- ASSERT(NeedsEagerFrame());
- CpuFeatureScope scope(masm(), SSE2);
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
@@ -3116,7 +3173,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding);
+ __ j(equal, &no_padding, Label::kNear);
EmitReturn(instr, true);
__ bind(&no_padding);
@@ -3131,7 +3188,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -3154,7 +3211,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3245,13 +3302,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- if (instr->object()->IsConstantOperand()) {
- ExternalReference external_reference = ToExternalReference(
- LConstantOperand::cast(instr->object()));
- __ mov(result, MemOperand::StaticVariable(external_reference));
- } else {
- __ mov(result, MemOperand(ToRegister(instr->object()), offset));
- }
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ __ Load(result, operand, access.representation());
return;
}
@@ -3261,7 +3316,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, FieldOperand(object, offset));
+ __ movsd(result, FieldOperand(object, offset));
} else {
X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
@@ -3269,12 +3324,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ mov(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset));
+ object = result;
}
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
@@ -3349,6 +3403,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3405,7 +3465,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
X87Mov(ToX87Register(instr->result()), operand);
}
@@ -3476,7 +3536,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
+ __ movsd(result, double_load_operand);
} else {
X87Mov(ToX87Register(instr->result()), double_load_operand);
}
@@ -3621,6 +3681,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
// Do not transform the receiver to object for strict mode
// functions.
@@ -3628,12 +3689,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
+ __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ __ j(not_equal, &receiver_ok, dist);
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -3693,7 +3754,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
@@ -3778,9 +3838,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
__ LoadHeapObject(edi, function);
@@ -3805,6 +3862,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
@@ -3845,7 +3903,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
__ jmp(&allocated, Label::kNear);
@@ -3903,11 +3961,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
@@ -3924,7 +3982,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3977,7 +4035,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3992,16 +4050,18 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
+ __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
@@ -4010,16 +4070,16 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmp(output_reg, 0x80000000u);
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
+ __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ movaps(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
@@ -4027,12 +4087,12 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done);
+ __ j(equal, &done, dist);
__ sub(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
@@ -4059,7 +4119,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -4125,97 +4185,33 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), SSE2);
-
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ mov(native_context, FieldOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- __ movzx_w(scratch3, state0);
- __ imul(scratch3, scratch3, 18273);
- __ shr(state0, 16);
- __ add(state0, scratch3);
- // Save state[0].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(scratch3, state1);
- __ imul(scratch3, scratch3, 36969);
- __ shr(state1, 16);
- __ add(state1, scratch3);
- // Save state[1].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = state0;
- __ shl(random, 14);
- __ and_(state1, Immediate(0x3FFFF));
- __ add(random, state1);
-
- // Convert 32 random bits in random to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
- __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(scratch4, scratch3);
- __ movd(result, random);
- __ cvtss2sd(scratch4, scratch4);
- __ xorps(result, scratch4);
- __ subsd(result, scratch4);
-}
-
-
void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
@@ -4225,10 +4221,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -4273,7 +4270,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
@@ -4321,7 +4317,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
+ __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ } else {
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4388,13 +4389,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// look at the first argument
__ mov(ecx, Operand(esp, 0));
__ test(ecx, ecx);
- __ j(zero, &packed_case);
+ __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
@@ -4409,7 +4410,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -4424,7 +4426,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ lea(result, Operand(base, instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
}
@@ -4445,7 +4453,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
- __ mov(operand, value);
+ __ Store(value, operand, representation);
}
return;
}
@@ -4480,7 +4488,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(object, offset), value);
+ __ movsd(FieldOperand(object, offset), value);
} else {
X87Register value = ToX87Register(instr->value());
X87Mov(FieldOperand(object, offset), value);
@@ -4518,17 +4526,24 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
+ MemOperand operand = FieldOperand(write_register, offset);
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ __ Store(value, operand, representation);
+ } else if (representation.IsInteger32()) {
+ Immediate immediate = ToImmediate(operand_value, representation);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, immediate);
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(FieldOperand(write_register, offset), handle_value);
+ __ mov(operand, handle_value);
}
} else {
- __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4609,8 +4624,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
@@ -4618,7 +4634,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
X87Mov(operand, ToX87Register(instr->value()));
}
@@ -4674,13 +4690,13 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value;
__ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
- __ movdbl(double_store_operand, value);
+ __ movsd(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -4710,15 +4726,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ fld(0);
__ FCmp();
- __ j(parity_odd, &no_special_nan_handling);
+ __ j(parity_odd, &no_special_nan_handling, Label::kNear);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
__ add(esp, Immediate(kDoubleSize));
Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ jmp(&no_special_nan_handling);
+ __ j(not_equal, &canonicalize, Label::kNear);
+ __ jmp(&no_special_nan_handling, Label::kNear);
__ bind(&canonicalize);
__ fstp(0);
__ fld_d(Operand::StaticVariable(canonical_nan_reference));
@@ -4803,8 +4819,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
@@ -4825,22 +4843,18 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(esi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(eax)) {
- __ push(object_reg);
- }
- LoadContextFromDeferred(instr->context());
- if (!object_reg.is(eax)) {
- __ pop(eax);
+ __ mov(eax, object_reg);
}
__ mov(ebx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
@@ -4964,10 +4978,19 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(ToRegister(instr->context()).is(esi));
+ if (FLAG_new_string_add) {
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ NewStringAddStub stub(instr->hydrogen()->flags(),
+ isolate()->heap()->GetPretenureMode());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4978,7 +5001,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else if (input->IsRegister()) {
Register input_reg = ToRegister(input);
__ push(input_reg);
@@ -5001,14 +5024,21 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
}
@@ -5084,6 +5114,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -5098,7 +5129,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
@@ -5107,7 +5138,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
@@ -5143,12 +5174,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5192,7 +5223,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5319,7 +5350,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -5328,28 +5359,17 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
} else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (and hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
}
+
// Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
@@ -5358,6 +5378,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5367,7 +5400,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// input register since we avoid dependencies.
__ mov(temp_reg, input_reg);
__ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(temp_reg));
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
@@ -5375,25 +5408,36 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
- Label heap_number, slow_case;
+ Label no_heap_number, check_bools, check_false;
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
+ __ Set(input_reg, Immediate(0));
__ jmp(done);
-
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5428,12 +5472,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5498,7 +5546,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5525,7 +5574,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5605,7 +5655,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
@@ -5660,23 +5710,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success);
- __ j(equal, &success);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set->last();
- __ CompareMap(reg, map, &success);
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
} else {
@@ -5690,8 +5739,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -5707,6 +5757,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -5725,8 +5777,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -5751,13 +5803,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for heap number
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kFar);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ jmp(&zero_result);
+ __ jmp(&zero_result, Label::kNear);
// Heap number
__ bind(&heap_number);
@@ -5772,15 +5824,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Test for negative values --> clamp to zero
__ test(scratch, scratch);
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
- __ j(zero, &zero_result);
+ __ j(zero, &zero_result, Label::kNear);
__ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
const uint32_t non_int8_exponent = 7;
__ cmp(scratch2, Immediate(non_int8_exponent + 1));
@@ -5811,18 +5863,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
__ cmp(scratch2, Immediate(1 << one_half_bit_shift));
Label no_round;
- __ j(less, &no_round);
+ __ j(less, &no_round, Label::kNear);
Label round_up;
__ mov(scratch2, Immediate(1 << one_half_bit_shift));
- __ j(greater, &round_up);
+ __ j(greater, &round_up, Label::kNear);
__ test(scratch3, scratch3);
- __ j(not_zero, &round_up);
+ __ j(not_zero, &round_up, Label::kNear);
__ mov(scratch2, scratch);
__ and_(scratch2, Immediate(1 << one_bit_shift));
__ shr(scratch2, 1);
__ bind(&round_up);
__ add(scratch, scratch2);
- __ j(overflow, &largest_value);
+ __ j(overflow, &largest_value, Label::kNear);
__ bind(&no_round);
__ shr(scratch, 23);
__ mov(result_reg, scratch);
@@ -5837,7 +5889,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// bit is set.
__ and_(scratch, HeapNumber::kMantissaMask);
__ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ j(not_zero, &zero_result); // M!=0 --> NaN
+ __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
// Infinity -> Fall through to map to 255.
__ bind(&largest_value);
@@ -5846,7 +5898,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ bind(&zero_result);
__ xor_(result_reg, result_reg);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
@@ -5896,7 +5948,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5929,7 +5985,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Immediate(Smi::FromInt(0)));
+ __ Set(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5942,19 +5998,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(size)));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ push(Immediate(Smi::FromInt(flags)));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5994,7 +6053,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ push(ebx);
@@ -6039,6 +6098,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
@@ -6047,43 +6107,48 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- input, instr->type_literal());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
+ __ JumpIfSmi(input, true_label, true_distance);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
+ __ j(above_equal, false_label, false_distance);
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
@@ -6093,8 +6158,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -6103,29 +6168,29 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- __ j(below, false_label);
+ __ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
+ __ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else {
- __ jmp(false_label);
+ __ jmp(false_label, false_distance);
}
return final_branch_condition;
}
@@ -6157,14 +6222,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
@@ -6173,7 +6237,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6195,6 +6259,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
@@ -6244,7 +6313,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -6257,7 +6326,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6284,6 +6353,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
@@ -6321,9 +6391,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Immediate(Smi::FromInt(0)));
- __ j(not_equal, &load_cache);
+ __ j(not_equal, &load_cache, Label::kNear);
__ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
@@ -6351,7 +6421,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done;
__ cmp(index, Immediate(0));
- __ j(less, &out_of_object);
+ __ j(less, &out_of_object, Label::kNear);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/ia32/lithium-codegen-ia32.h
index 769917f7e24..638f80c3549 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.h
@@ -33,6 +33,7 @@
#include "checks.h"
#include "deoptimizer.h"
#include "ia32/lithium-gap-resolver-ia32.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -45,45 +46,28 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -129,12 +113,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
void X87Fxch(X87Register reg, int other_slot = 0) {
x87_stack_.Fxch(reg, other_slot);
}
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
bool X87StackEmpty() {
return x87_stack_.depth() == 0;
@@ -188,27 +177,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -220,14 +195,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -251,7 +229,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -292,6 +271,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
@@ -319,6 +302,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -331,12 +318,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
@@ -362,10 +350,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -395,7 +380,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -425,26 +410,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
class X87Stack {
@@ -505,8 +480,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
index b5bc18bdc96..d621bd261d6 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -326,7 +326,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- __ movdbl(dst, Operand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
@@ -360,7 +360,7 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
}
} else {
// load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@ void LGapResolver::EmitMove(int index) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
// load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@ void LGapResolver::EmitSwap(int index) {
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
@@ -499,12 +499,12 @@ void LGapResolver::EmitSwap(int index) {
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/ia32/lithium-ia32.cc
index a5acb9fa9e4..aa35e9d6b40 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-ia32.cc
@@ -302,7 +302,8 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
@@ -386,9 +387,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) {
+ if (kind == DOUBLE_REGISTERS) {
spill_slot_count_++;
spill_slot_count_ |= 1;
num_double_slots_++;
@@ -397,11 +398,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -479,7 +481,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
}
@@ -488,7 +490,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -560,29 +562,42 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) {
}
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+ Register fixed_register) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseFixed(value, fixed_register);
+}
+
+
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
@@ -707,7 +722,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -762,52 +777,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
}
- } else {
- right = UseFixed(right_value, ecx);
- }
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -816,21 +823,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -914,10 +922,33 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -947,7 +978,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -964,7 +994,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
clobber->set_hydrogen_value(current);
chunk_->AddInstruction(clobber, current_block_);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1061,21 +1090,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor);
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
ToBooleanStub::Types expected = instr->expected_input_types();
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
// environment is needed since all cases are handled.
+ HValue* value = instr->value();
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
@@ -1141,12 +1164,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1171,7 +1188,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseAny(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1186,11 +1202,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1238,7 +1254,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1246,7 +1261,6 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1279,10 +1293,9 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1344,10 +1357,9 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineSameAsFirst(result);
}
@@ -1356,7 +1368,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1364,7 +1375,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1372,14 +1382,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1387,7 +1395,6 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1396,7 +1403,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1405,14 +1411,14 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LInstruction* result = DefineFixed(call, eax);
+ if (instr->IsTailCall()) return result;
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1442,29 +1448,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1481,8 +1477,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1562,10 +1559,6 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
@@ -1584,17 +1577,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1618,7 +1604,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1639,7 +1624,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1670,8 +1654,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1713,19 +1711,6 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, xmm1);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsSmiOrTagged());
ASSERT(instr->right()->representation().IsSmiOrTagged());
@@ -1752,9 +1737,12 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
@@ -1766,6 +1754,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1764,18 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
@@ -1884,14 +1884,43 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+ if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+ if (FLAG_debug_code) {
+ return UseFixed(instr->value(), eax);
+ } else {
+ return UseFixedOrConstant(instr->value(), eax);
+ }
+ } else {
+ if (FLAG_debug_code) {
+ return UseRegisterAtStart(instr->value());
+ } else {
+ return UseRegisterOrConstantAtStart(instr->value());
+ }
+ }
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = GetSeqStringSetCharOperand(instr);
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -1909,6 +1938,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -1944,7 +1980,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
@@ -2051,12 +2086,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2121,12 +2150,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), esi)
- : NULL;
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), eax), context,
- parameter_count);
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), eax), context, parameter_count);
}
@@ -2235,6 +2262,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2389,7 +2421,7 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new_map_reg, temp_reg);
return result;
} else {
- LOperand* context = UseRegister(instr->context());
+ LOperand* context = UseFixed(instr->context(), esi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result);
@@ -2436,7 +2468,12 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (needs_write_barrier) {
+ if (instr->field_representation().IsInteger8() ||
+ instr->field_representation().IsUInteger8()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
@@ -2482,8 +2519,12 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
+ LOperand* left = FLAG_new_string_add
+ ? UseFixed(instr->left(), edx)
+ : UseOrConstantAtStart(instr->left());
+ LOperand* right = FLAG_new_string_add
+ ? UseFixed(instr->right(), eax)
+ : UseOrConstantAtStart(instr->right());
LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2552,7 +2593,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2583,7 +2624,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2638,6 +2678,8 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2712,7 +2754,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/ia32/lithium-ia32.h
index aa5c0bbeed7..ea4fef8a710 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/ia32/lithium-ia32.h
@@ -74,6 +74,7 @@ class LCodeGen;
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -93,6 +94,7 @@ class LCodeGen;
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
+ V(Dummy) \
V(DummyUse) \
V(ElementsKind) \
V(ForInCacheArray) \
@@ -107,7 +109,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -116,7 +117,6 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
@@ -130,6 +130,7 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
@@ -153,10 +154,10 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(Power) \
- V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -216,7 +217,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -257,15 +257,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -311,7 +302,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -443,6 +433,13 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
@@ -753,15 +750,13 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -864,15 +859,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -907,9 +900,9 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -917,22 +910,25 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- explicit LIsNumberAndBranch(LOperand* value) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
@@ -994,6 +990,7 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
@@ -1098,6 +1095,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1124,6 +1122,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
@@ -1145,19 +1144,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1310,7 +1296,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1375,27 +1361,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
};
@@ -1463,28 +1461,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1540,7 +1516,8 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* context,
+ explicit LReturn(LOperand* value,
+ LOperand* context,
LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = context;
@@ -1606,6 +1583,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1635,11 +1621,6 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1820,19 +1801,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -2062,8 +2043,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2203,7 +2189,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2378,8 +2364,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2529,12 +2517,13 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
+ LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2756,8 +2745,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
: LChunk(info, graph),
num_double_slots_(0) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
int num_double_slots() const { return num_double_slots_; }
@@ -2779,13 +2768,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2857,6 +2847,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+ // An input operand in a fixed register or a constant operand.
+ MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+ Register fixed_register);
+
// An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
@@ -2902,6 +2896,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+ LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
// Marks a call for the register allocator. Assigns a pointer map to
// support GC and lazy deoptimization. Assigns an environment to support
// eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
@@ -2921,7 +2917,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
@@ -2935,7 +2931,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index b65d328435e..52d42f6ca87 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -45,7 +46,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
@@ -55,6 +55,34 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
@@ -232,7 +260,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
@@ -253,8 +281,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -283,7 +311,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -344,7 +372,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
@@ -361,7 +389,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
@@ -390,9 +418,9 @@ void MacroAssembler::TaggedToI(Register result_reg,
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -445,25 +473,36 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -574,6 +613,10 @@ void MacroAssembler::RecordWriteForMap(
return;
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
@@ -630,6 +673,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@@ -676,6 +723,12 @@ void MacroAssembler::DebugBreak() {
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -799,9 +852,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -821,7 +874,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -834,8 +887,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -849,9 +902,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -864,10 +915,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -996,6 +1045,30 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
@@ -1033,10 +1106,8 @@ void MacroAssembler::EnterExitFramePrologue() {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
@@ -1051,7 +1122,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1095,7 +1166,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -1109,14 +1180,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1201,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -1344,8 +1417,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
@@ -1421,8 +1495,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1440,7 +1513,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -1942,30 +2015,48 @@ void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
+ Label short_loop, len4, len8, len12, done, short_string;
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
- jmp(&done);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
@@ -2096,8 +2187,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2109,8 +2198,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2141,23 +2229,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -2172,7 +2246,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -2221,11 +2296,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2358,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2381,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2418,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -2543,7 +2630,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -2552,18 +2639,25 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
+ ASSERT(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper, call_kind);
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2980,6 +3074,40 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3003,6 +3131,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3063,6 +3273,42 @@ void MacroAssembler::JumpIfNotUniqueName(Operand operand,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Throw(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ ThrowIf(not_equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ // Can't use overflow here directly, compiler can't seem to disambiguate.
+ ThrowIf(NegateCondition(no_overflow), kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ ThrowIf(greater_equal, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ ThrowIf(less, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -3379,7 +3625,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
@@ -3408,9 +3654,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
-
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3419,15 +3664,40 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Immediate(isolate()->factory()->allocation_memento_map()));
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index e4e4533bf5f..054b164846d 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -61,6 +61,9 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(Register src, const Operand& dst, Representation r);
+
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -225,6 +228,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -240,7 +246,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -343,6 +349,13 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -366,6 +379,12 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
@@ -408,13 +427,8 @@ class MacroAssembler: public Assembler {
bool specialize_for_processor,
int offset = 0);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success);
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -509,6 +523,7 @@ class MacroAssembler: public Assembler {
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -575,6 +590,12 @@ class MacroAssembler: public Assembler {
// Throw past all JS frames to the top JS entry frame.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -754,11 +775,20 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -807,7 +837,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- int return_value_offset_from_ebp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -881,8 +912,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -890,6 +919,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -914,6 +954,11 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -931,13 +976,27 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
+ // If allocation info is present, conditional code is set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
private:
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -957,7 +1016,7 @@ class MacroAssembler: public Assembler {
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/chromium/v8/src/ia32/simulator-ia32.cc b/chromium/v8/src/ia32/simulator-ia32.cc
index ab8169375c0..b6f2847332e 100644
--- a/chromium/v8/src/ia32/simulator-ia32.cc
+++ b/chromium/v8/src/ia32/simulator-ia32.cc
@@ -27,4 +27,3 @@
// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/chromium/v8/src/ia32/stub-cache-ia32.cc b/chromium/v8/src/ia32/stub-cache-ia32.cc
index bebd7bebc9a..9efedc67325 100644
--- a/chromium/v8/src/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ia32/stub-cache-ia32.cc
@@ -325,32 +325,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -409,11 +405,11 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
+ ExternalReference(IC_Utility(id), masm->isolate()),
StubCache::kInterceptorArgsLength);
}
@@ -455,53 +451,151 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
}
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context);
+
+
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : ReturnValue default value
- // -- esp[24] : ReturnValue
- // -- esp[28] : last argument
- // -- ...
- // -- esp[(argc + 6) * 4] : first argument
- // -- esp[(argc + 7) * 4] : receiver
- // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ // Construct the FunctionCallbackInfo.
+ __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
+ Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, 5 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, 6 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(eax, Operand(esp, 1 * kPointerSize));
+
+ GenerateFastApiCallBody(masm, optimization, argc, false);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+
+ // Copy return value.
+ __ pop(scratch1);
+
+ // receiver
+ __ push(receiver);
+
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch1.is(arg));
+ ASSERT(!scratch2.is(arg));
+ ASSERT(!scratch3.is(arg));
+ __ push(arg);
+ }
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(esi);
+
+ // Get the function and setup the context.
+ Handle<JSFunction> function = optimization.constant_function();
+ __ LoadHeapObject(scratch2, function);
+ __ mov(esi, FieldOperand(scratch2, JSFunction::kContextOffset));
+ // callee
+ __ push(scratch2);
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data(), isolate);
+ // Push data from ExecutableAccessorInfo.
+ if (isolate->heap()->InNewSpace(*call_data)) {
+ __ mov(scratch2, api_call_info);
+ __ mov(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ __ push(scratch3);
+ } else {
+ __ push(Immediate(call_data));
+ }
+ // return value
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate)));
+ // holder
+ __ push(receiver);
+
+ // store receiver address for GenerateFastApiCallBody
+ ASSERT(!scratch1.is(eax));
+ __ mov(eax, esp);
+ // return address
+ __ push(scratch1);
+
+ GenerateFastApiCallBody(masm, optimization, argc, true);
+}
+
+
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
+ // -- ...
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
+ //
+ // -- eax : receiver address
+ // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -513,18 +607,20 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
- // v8::Arguments::implicit_args_.
+ // FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate(argc * kPointerSize));
- // v8::Arguments::values_.
+ __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), eax);
- // v8::Arguments::length_.
+ // FunctionCallbackInfo::length_.
__ Set(ApiParameterOperand(4), Immediate(argc));
- // v8::Arguments::is_construct_call_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
@@ -533,57 +629,29 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- kFastApiCallArguments + 1);
-}
-
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Copy return value.
- __ mov(scratch, Operand(esp, 0));
- // Assign stack space for the call arguments.
- __ sub(esp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ mov(Operand(esp, 0), scratch);
- // Write holder to stack frame.
- __ mov(Operand(esp, 1 * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ mov(Operand(esp, index-- * kPointerSize), receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ mov(Operand(esp, index-- * kPointerSize), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
+ CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_state)
+ ExtraICState extra_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name),
- extra_state_(extra_state) {}
+ name_(name) {}
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
@@ -654,9 +722,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -670,10 +739,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
+ handle(lookup->holder()), scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -686,13 +755,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ Handle<JSFunction> fun = optimization.constant_function();
+ stub_compiler_->GenerateJumpFunction(object, fun);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -719,20 +783,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, interceptor_holder,
+ IC::kLoadPropertyWithInterceptorForCall);
// Restore the name_ register.
__ pop(name_);
@@ -747,17 +808,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ __ push(receiver);
+ __ push(holder);
+ __ push(name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, holder_obj,
+ IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ __ pop(name_);
+ __ pop(holder);
+ __ pop(receiver);
// Leave the internal frame.
}
@@ -765,16 +826,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ j(not_equal, interceptor_succeeded);
}
- StubCompiler* stub_compiler_;
+ CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_state_;
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
@@ -805,7 +865,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -823,19 +883,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -858,7 +918,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -872,7 +932,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
@@ -880,7 +940,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
@@ -994,15 +1054,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1037,7 +1097,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1050,14 +1110,14 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
@@ -1115,26 +1175,6 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1144,7 +1184,7 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -1154,11 +1194,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Handle<Map>(object->map()));
+ __ mov(scratch1, receiver_map);
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1166,31 +1206,38 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Keep track of the current object in register reg.
Register reg = object_reg;
- Handle<JSObject> current = object;
int depth = 0;
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
- while (!current.is_identical_to(holder)) {
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1201,16 +1248,19 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
if (in_new_space) {
@@ -1231,70 +1281,65 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
- ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1340,15 +1385,15 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1364,34 +1409,33 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), name(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
- __ mov(scratch, Immediate(callback));
- __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1401,9 +1445,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
__ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
@@ -1434,20 +1478,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_address,
ApiParameterOperand(2),
kStackSpace,
- 7);
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1498,11 +1543,9 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -1554,22 +1597,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
+void CallStubCompiler::GenerateFunctionCheck(Register function,
+ Register scratch,
+ Label* miss) {
+ __ JumpIfSmi(function, miss);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, miss);
}
@@ -1592,9 +1625,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ JumpIfSmi(edi, miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss);
+ GenerateFunctionCheck(edi, ebx, miss);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
@@ -1610,7 +1641,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_state_);
+ extra_state());
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1619,57 +1650,20 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
+ Register reg = HandlerFrontendHeader(
+ object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(
masm(), edi, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
+ GenerateJumpFunction(object, edi, &miss);
- // Check that the function really is a function.
- __ JumpIfSmi(edi, &miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetCode(Code::FAST, name);
}
@@ -1682,28 +1676,16 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Code::StubType type) {
Label miss;
- // Check that function is still array
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ const int argc = arguments().immediate();
__ mov(eax, Immediate(argc));
__ mov(ebx, site_feedback_cell);
__ mov(edi, function);
@@ -1711,8 +1693,7 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1726,33 +1707,20 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1970,8 +1938,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
1);
}
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1985,31 +1952,18 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -2041,6 +1995,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
Immediate(factory()->the_hole_value()));
+ const int argc = arguments().immediate();
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
@@ -2053,8 +2008,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
argc + 1,
1);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2068,14 +2022,6 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) {
return Handle<Code>::null();
@@ -2089,22 +2035,12 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = ebx;
Register index = edi;
@@ -2138,8 +2074,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2153,14 +2088,6 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) {
return Handle<Code>::null();
@@ -2174,22 +2101,12 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = eax;
Register index = edi;
@@ -2225,8 +2142,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2240,14 +2156,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2257,18 +2165,10 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2291,19 +2191,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2317,14 +2210,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
if (!CpuFeatures::IsSupported(SSE2)) {
return Handle<Code>::null();
}
@@ -2340,20 +2225,10 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2368,7 +2243,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
@@ -2418,7 +2293,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
@@ -2426,16 +2301,12 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2449,14 +2320,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2466,20 +2329,10 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2535,16 +2388,12 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2588,8 +2437,8 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, depth, &miss);
+ CheckPrototypes(IC::CurrentTypeOf(object, isolate()), edx, holder,
+ ebx, eax, edi, name, depth, &miss);
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
@@ -2602,36 +2451,50 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ __ cmp(object, factory()->true_value());
+ __ j(equal, &success);
+ __ cmp(object, factory()->false_value());
+ __ j(not_equal, miss);
+ __ bind(&success);
+}
+
+
+void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
+ if (object->IsGlobalObject()) {
+ const int argc = arguments().immediate();
+ const int receiver_offset = (argc + 1) * kPointerSize;
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, receiver_offset), edx);
+ }
+}
+
+
+Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss) {
+ GenerateNameCheck(name, miss);
+
+ Register reg = edx;
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ const int receiver_offset = (argc + 1) * kPointerSize;
+ __ mov(reg, Operand(esp, receiver_offset));
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(edx, &miss);
+ __ JumpIfSmi(reg, miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2642,129 +2505,79 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
__ IncrementCounter(isolate()->counters()->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
- edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
+ reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
+ ebx, eax, edi, name, miss);
+
break;
- case STRING_CHECK:
+ case STRING_CHECK: {
// Check that the object is a string.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
+ __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, eax, miss);
break;
-
- case SYMBOL_CHECK:
+ }
+ case SYMBOL_CHECK: {
// Check that the object is a symbol.
- __ CmpObjectType(edx, SYMBOL_TYPE, eax);
- __ j(not_equal, &miss);
+ __ CmpObjectType(reg, SYMBOL_TYPE, eax);
+ __ j(not_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::SYMBOL_FUNCTION_INDEX, eax, miss);
break;
-
+ }
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
+ __ JumpIfSmi(reg, &fast);
+ __ CmpObjectType(reg, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax, miss);
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
+ GenerateBooleanCheck(reg, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, miss);
break;
}
}
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
+ if (check != RECEIVER_MAP_CHECK) {
+ Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(prototype, isolate()),
+ eax, holder, ebx, edx, edi, name, miss);
+ }
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ return reg;
}
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss) {
+ // Check that the function really is a function.
+ GenerateFunctionCheck(function, ebx, miss);
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
+ if (!function.is(edi)) __ mov(edi, function);
+ PatchGlobalProxy(object);
- // Return the generated code.
- return GetCode(function);
+ // Invoke the function.
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
@@ -2778,39 +2591,19 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
+ CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
&miss);
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Check that the function really is a function.
- __ JumpIfSmi(eax, &miss);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
+ GenerateJumpFunction(object, eax, &miss);
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(Code::FAST, name);
}
@@ -2820,14 +2613,6 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
@@ -2837,40 +2622,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ // Potentially loads a closure that matches the shared function info of the
+ // function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
+ GenerateJumpFunction(object, edi, function);
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Set up the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
@@ -2882,9 +2640,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -2899,7 +2656,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2908,16 +2665,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(), 1, values);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), this->name(), 1, values);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2975,16 +2732,15 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ push(receiver());
__ push(this->name());
__ push(value());
- __ push(Immediate(Smi::FromInt(strict_mode())));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3016,23 +2772,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3085,18 +2836,14 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
@@ -3119,16 +2866,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(eax, Immediate(cell));
@@ -3146,8 +2891,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
+ HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
@@ -3155,12 +2899,12 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ ret(0);
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -3171,16 +2915,24 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
GenerateNameCheck(name, this->name(), &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<Type> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ cmp(map_reg, map);
+ if (type->Is(Type::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ j(equal, handlers->at(current));
}
}
@@ -3207,11 +2959,11 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3234,13 +2986,13 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}